diff options
author | David S. Miller <davem@davemloft.net> | 2013-08-16 15:37:26 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-08-16 15:37:26 -0700 |
commit | 2ff1cf12c9fe70e75e600404e6a4274b19d293ed (patch) | |
tree | beafddac0a8098e3f07d2ec60e44a2a7d006e605 | |
parent | 16b304f3404f8e0243d5ee2b70b68767b7b59b2b (diff) | |
parent | 0f7dd1aa8f959216f1faa71513b9d3c1a9065e5a (diff) | |
download | talos-op-linux-2ff1cf12c9fe70e75e600404e6a4274b19d293ed.tar.gz talos-op-linux-2ff1cf12c9fe70e75e600404e6a4274b19d293ed.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
357 files changed, 3306 insertions, 1865 deletions
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl index 6a8b7158697f..9c92bb879b6d 100644 --- a/Documentation/DocBook/media_api.tmpl +++ b/Documentation/DocBook/media_api.tmpl @@ -1,6 +1,6 @@ <?xml version="1.0"?> -<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" - "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [ +<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" + "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [ <!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities; <!ENTITY media-indices SYSTEM "./media-indices.tmpl"> diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt index a1ee681942cc..6113f9275f42 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt @@ -4,7 +4,7 @@ Required properties : - reg : Offset and length of the register set for the device - - compatible : Should be "marvell,mv64xxx-i2c" + - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c" - interrupts : The interrupt number Optional properties : diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt index d5a308629c57..30b0581bb1ce 100644 --- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt +++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt @@ -31,9 +31,8 @@ Optional nodes: Optional sub-node properties: ti,warm-reset - maintain voltage during warm reset(boolean) ti,roof-floor - control voltage selection by pin(boolean) - ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto, + ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto, 2 - eco, 3 - forced pwm - ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us ti,smps-range - OTP has the wrong range set for the hardware so override 0 - low range, 1 - high range. @@ -59,7 +58,6 @@ pmic { ti,warm-reset; ti,roof-floor; ti,mode-sleep = <0>; - ti,tstep = <0>; ti,smps-range = <1>; }; diff --git a/MAINTAINERS b/MAINTAINERS index 9ee0a8faa651..1c6f9db0de72 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -965,6 +965,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE +M: Santosh Shilimkar <santosh.shilimkar@ti.com> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-keystone/ + ARM/LOGICPD PXA270 MACHINE SUPPORT M: Lennert Buytenhek <kernel@wantstofly.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -1259,7 +1265,6 @@ F: drivers/rtc/rtc-coh901331.c T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/Ux500 ARM ARCHITECTURE -M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> M: Linus Walleij <linus.walleij@linaro.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -5576,9 +5581,9 @@ S: Maintained F: drivers/media/tuners/mxl5007t.* MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) -M: Andrew Gallatin <gallatin@myri.com> +M: Hyong-Youb Kim <hykim@myri.com> L: netdev@vger.kernel.org -W: http://www.myri.com/scs/download-Myri10GE.html +W: https://www.myricom.com/support/downloads/myri10ge.html S: Supported F: drivers/net/ethernet/myricom/myri10ge/ @@ -8665,6 +8670,11 @@ T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained F: sound/usb/midi.* +USB NETWORKING DRIVERS +L: linux-usb@vger.kernel.org +S: Odd Fixes +F: drivers/net/usb/ + USB OHCI DRIVER M: Alan Stern <stern@rowland.harvard.edu> L: linux-usb@vger.kernel.org @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 11 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc5 NAME = Linux for Workgroups # *DOCUMENTATION* diff --git a/arch/Kconfig b/arch/Kconfig index 8d2ae24b9f4a..1feb169274fe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -407,6 +407,12 @@ config CLONE_BACKWARDS2 help Architecture has the first two arguments of clone(2) swapped. +config CLONE_BACKWARDS3 + bool + help + Architecture has tls passed as the 3rd argument of clone(2), + not the 5th one. + config ODD_RT_SIGACTION bool help diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts index db2060c46540..9c1167b0459b 100644 --- a/arch/arm/boot/dts/msm8960-cdp.dts +++ b/arch/arm/boot/dts/msm8960-cdp.dts @@ -26,7 +26,7 @@ cpu-offset = <0x80000>; }; - msmgpio: gpio@fd510000 { + msmgpio: gpio@800000 { compatible = "qcom,msm-gpio"; gpio-controller; #gpio-cells = <2>; @@ -34,7 +34,7 @@ interrupts = <0 32 0x4>; interrupt-controller; #interrupt-cells = <2>; - reg = <0xfd510000 0x4000>; + reg = <0x800000 0x4000>; }; serial@16440000 { diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts index 08b72678abff..65d7b601651c 100644 --- a/arch/arm/boot/dts/omap5-uevm.dts +++ b/arch/arm/boot/dts/omap5-uevm.dts @@ -235,7 +235,7 @@ }; &mmc1 { - vmmc-supply = <&vmmcsd_fixed>; + vmmc-supply = <&ldo9_reg>; bus-width = <4>; }; @@ -282,6 +282,7 @@ regulators { smps123_reg: smps123 { + /* VDD_OPP_MPU */ regulator-name = "smps123"; regulator-min-microvolt = < 600000>; regulator-max-microvolt = <1500000>; @@ -290,6 +291,7 @@ }; smps45_reg: smps45 { + /* VDD_OPP_MM */ regulator-name = "smps45"; regulator-min-microvolt = < 600000>; regulator-max-microvolt = <1310000>; @@ -298,6 +300,7 @@ }; smps6_reg: smps6 { + /* VDD_DDR3 - over VDD_SMPS6 */ regulator-name = "smps6"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; @@ -306,6 +309,7 @@ }; smps7_reg: smps7 { + /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */ regulator-name = "smps7"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -314,6 +318,7 @@ }; smps8_reg: smps8 { + /* VDD_OPP_CORE */ regulator-name = "smps8"; regulator-min-microvolt = < 600000>; regulator-max-microvolt = <1310000>; @@ -322,15 +327,15 @@ }; smps9_reg: smps9 { + /* VDDA_2v1_AUD over VDD_2v1 */ regulator-name = "smps9"; regulator-min-microvolt = <2100000>; regulator-max-microvolt = <2100000>; - regulator-always-on; - regulator-boot-on; ti,smps-range = <0x80>; }; smps10_reg: smps10 { + /* VBUS_5V_OTG */ regulator-name = "smps10"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; @@ -339,38 +344,40 @@ }; ldo1_reg: ldo1 { + /* VDDAPHY_CAM: vdda_csiport */ regulator-name = "ldo1"; - regulator-min-microvolt = <2800000>; - regulator-max-microvolt = <2800000>; - regulator-always-on; - regulator-boot-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1800000>; }; ldo2_reg: ldo2 { + /* VCC_2V8_DISP: Does not go anywhere */ regulator-name = "ldo2"; - regulator-min-microvolt = <2900000>; - regulator-max-microvolt = <2900000>; - regulator-always-on; - regulator-boot-on; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + /* Unused */ + status = "disabled"; }; ldo3_reg: ldo3 { + /* VDDAPHY_MDM: vdda_lli */ regulator-name = "ldo3"; - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; - regulator-always-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; regulator-boot-on; + /* Only if Modem is used */ + status = "disabled"; }; ldo4_reg: ldo4 { + /* VDDAPHY_DISP: vdda_dsiport/hdmi */ regulator-name = "ldo4"; - regulator-min-microvolt = <2200000>; - regulator-max-microvolt = <2200000>; - regulator-always-on; - regulator-boot-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1800000>; }; ldo5_reg: ldo5 { + /* VDDA_1V8_PHY: usb/sata/hdmi.. */ regulator-name = "ldo5"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -379,38 +386,43 @@ }; ldo6_reg: ldo6 { + /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */ regulator-name = "ldo6"; - regulator-min-microvolt = <1500000>; - regulator-max-microvolt = <1500000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; regulator-always-on; regulator-boot-on; }; ldo7_reg: ldo7 { + /* VDD_VPP: vpp1 */ regulator-name = "ldo7"; - regulator-min-microvolt = <1500000>; - regulator-max-microvolt = <1500000>; - regulator-always-on; - regulator-boot-on; + regulator-min-microvolt = <2000000>; + regulator-max-microvolt = <2000000>; + /* Only for efuse reprograming! */ + status = "disabled"; }; ldo8_reg: ldo8 { + /* VDD_3v0: Does not go anywhere */ regulator-name = "ldo8"; - regulator-min-microvolt = <1500000>; - regulator-max-microvolt = <1500000>; - regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; regulator-boot-on; + /* Unused */ + status = "disabled"; }; ldo9_reg: ldo9 { + /* VCC_DV_SDIO: vdds_sdcard */ regulator-name = "ldo9"; regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; - regulator-always-on; + regulator-max-microvolt = <3000000>; regulator-boot-on; }; ldoln_reg: ldoln { + /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */ regulator-name = "ldoln"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -419,12 +431,20 @@ }; ldousb_reg: ldousb { + /* VDDA_3V_USB: VDDA_USBHS33 */ regulator-name = "ldousb"; regulator-min-microvolt = <3250000>; regulator-max-microvolt = <3250000>; regulator-always-on; regulator-boot-on; }; + + regen3_reg: regen3 { + /* REGEN3 controls LDO9 supply to card */ + regulator-name = "regen3"; + regulator-always-on; + regulator-boot-on; + }; }; }; }; diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi index 7321403cab8a..f5b9898d9c6e 100644 --- a/arch/arm/boot/dts/stih41x.dtsi +++ b/arch/arm/boot/dts/stih41x.dtsi @@ -6,10 +6,12 @@ #address-cells = <1>; #size-cells = <0>; cpu@0 { + device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0>; }; cpu@1 { + device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <1>; }; diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi index 2fcb3f2ca160..5592be6f2f7a 100644 --- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi +++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi @@ -457,6 +457,7 @@ }; usb-phy@c5004000 { + status = "okay"; nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1) GPIO_ACTIVE_LOW>; }; diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 46e7cfb3e721..0baf7f0d9394 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -43,6 +43,7 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; struct vm_area_struct *vma; + unsigned long start, end; unsigned long range_start; unsigned long range_end; unsigned int nr; @@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = fullmm; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->vma = NULL; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d9f5cd4e533f..21f77906602c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -ENOENT; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 614e41e7881b..905efc8cac79 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -121,8 +121,7 @@ config MSM_SMD bool config MSM_GPIOMUX - depends on !(ARCH_MSM8X60 || ARCH_MSM8960) - bool "MSM V1 TLMM GPIOMUX architecture" + bool help Support for MSM V1 TLMM GPIOMUX architecture. diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c deleted file mode 100644 index 27de2abd7144..000000000000 --- a/arch/arm/mach-msm/gpiomux-v1.c +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ -#include <linux/kernel.h> -#include "gpiomux.h" -#include "proc_comm.h" - -void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val) -{ - unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) | - ((gpio & 0x3ff) << 4); - unsigned tlmm_disable = 0; - int rc; - - rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, - &tlmm_config, &tlmm_disable); - if (rc) - pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n", - __func__, rc, tlmm_config, tlmm_disable); -} diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h index 8e82f41a8923..4410d7766f93 100644 --- a/arch/arm/mach-msm/gpiomux.h +++ b/arch/arm/mach-msm/gpiomux.h @@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS]; int msm_gpiomux_write(unsigned gpio, gpiomux_config_t active, gpiomux_config_t suspended); - -/* Architecture-internal function for use by the framework only. - * This function can assume the following: - * - the gpio value has passed a bounds-check - * - the gpiomux spinlock has been obtained - * - * This function is not for public consumption. External users - * should use msm_gpiomux_write. - */ -void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val); #else static inline int msm_gpiomux_write(unsigned gpio, gpiomux_config_t active, diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c index 393aeefaebb0..043e5705f2a6 100644 --- a/arch/arm/mach-omap2/dss-common.c +++ b/arch/arm/mach-omap2/dss-common.c @@ -42,7 +42,7 @@ /* Using generic display panel */ static struct tfp410_platform_data omap4_dvi_panel = { - .i2c_bus_num = 3, + .i2c_bus_num = 2, .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO, }; diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 5cc92874be7e..f99f68e1e85b 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; const char *oh_name; int oh_cnt, i, ret = 0; + bool device_active = false; oh_cnt = of_property_count_strings(node, "ti,hwmods"); if (oh_cnt <= 0) { @@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev) goto odbfd_exit1; } hwmods[i] = oh; + if (oh->flags & HWMOD_INIT_NO_IDLE) + device_active = true; } od = omap_device_alloc(pdev, hwmods, oh_cnt); @@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev) pdev->dev.pm_domain = &omap_device_pm_domain; + if (device_active) { + omap_device_enable(pdev); + pm_runtime_set_active(&pdev->dev); + } + odbfd_exit1: kfree(hwmods); odbfd_exit: @@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); + int i; if (!od) return 0; @@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data) * If omap_device state is enabled, but has no driver bound, * idle it. */ + + /* + * Some devices (like memory controllers) are always kept + * enabled, and should not be idled even with no drivers. + */ + for (i = 0; i < od->hwmods_cnt; i++) + if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE) + return 0; + if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { if (od->_state == OMAP_DEVICE_STATE_ENABLED) { dev_warn(dev, "%s: enabled but no driver. Idling\n", diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 7341eff63f56..7f4db12b1459 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data) np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); if (np) - va_start = of_iomap(np, 0); + va_start = of_iomap(np, oh->mpu_rt_idx); } else { va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); } diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index aab33fd814c0..e1482a9b3bc2 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3; #define MODULEMODE_HWCTRL 1 #define MODULEMODE_SWCTRL 2 +#define DEBUG_OMAP2UART1_FLAGS 0 +#define DEBUG_OMAP2UART2_FLAGS 0 +#define DEBUG_OMAP2UART3_FLAGS 0 +#define DEBUG_OMAP3UART3_FLAGS 0 +#define DEBUG_OMAP3UART4_FLAGS 0 +#define DEBUG_OMAP4UART3_FLAGS 0 +#define DEBUG_OMAP4UART4_FLAGS 0 +#define DEBUG_TI81XXUART1_FLAGS 0 +#define DEBUG_TI81XXUART2_FLAGS 0 +#define DEBUG_TI81XXUART3_FLAGS 0 +#define DEBUG_AM33XXUART1_FLAGS 0 + +#define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET) + +#if defined(CONFIG_DEBUG_OMAP2UART1) +#undef DEBUG_OMAP2UART1_FLAGS +#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_OMAP2UART2) +#undef DEBUG_OMAP2UART2_FLAGS +#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_OMAP2UART3) +#undef DEBUG_OMAP2UART3_FLAGS +#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_OMAP3UART3) +#undef DEBUG_OMAP3UART3_FLAGS +#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_OMAP3UART4) +#undef DEBUG_OMAP3UART4_FLAGS +#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_OMAP4UART3) +#undef DEBUG_OMAP4UART3_FLAGS +#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_OMAP4UART4) +#undef DEBUG_OMAP4UART4_FLAGS +#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_TI81XXUART1) +#undef DEBUG_TI81XXUART1_FLAGS +#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_TI81XXUART2) +#undef DEBUG_TI81XXUART2_FLAGS +#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_TI81XXUART3) +#undef DEBUG_TI81XXUART3_FLAGS +#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS +#elif defined(CONFIG_DEBUG_AM33XXUART1) +#undef DEBUG_AM33XXUART1_FLAGS +#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS +#endif /** * struct omap_hwmod_mux_info - hwmod specific mux configuration @@ -568,6 +616,7 @@ struct omap_hwmod_link { * @voltdm: pointer to voltage domain (filled in at runtime) * @dev_attr: arbitrary device attributes that can be passed to the driver * @_sysc_cache: internal-use hwmod flags + * @mpu_rt_idx: index of device address space for register target (for DT boot) * @_mpu_rt_va: cached register target start address (internal use) * @_mpu_port: cached MPU register target slave (internal use) * @opt_clks_cnt: number of @opt_clks @@ -617,6 +666,7 @@ struct omap_hwmod { struct list_head node; struct omap_hwmod_ocp_if *_mpu_port; u16 flags; + u8 mpu_rt_idx; u8 response_lat; u8 rst_lines_cnt; u8 opt_clks_cnt; diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index d05fc7b54567..56cebb05509e 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c @@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = { .mpu_irqs = omap2_uart1_mpu_irqs, .sdma_reqs = omap2_uart1_sdma_reqs, .main_clk = "uart1_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, @@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = { .mpu_irqs = omap2_uart2_mpu_irqs, .sdma_reqs = omap2_uart2_sdma_reqs, .main_clk = "uart2_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, @@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = { .mpu_irqs = omap2_uart3_mpu_irqs, .sdma_reqs = omap2_uart3_sdma_reqs, .main_clk = "uart3_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 28bbd56346a9..eb2f3b93b51c 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = { .clkdm_name = "cpsw_125mhz_clkdm", .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), .main_clk = "cpsw_125mhz_gclk", + .mpu_rt_idx = 1, .prcm = { .omap4 = { .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET, @@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = { .name = "uart1", .class = &uart_class, .clkdm_name = "l4_wkup_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .main_clk = "dpll_per_m2_div4_wkupdm_ck", .prcm = { .omap4 = { diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index f7a3df2fb579..0c3a427da544 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = { .mpu_irqs = omap2_uart1_mpu_irqs, .sdma_reqs = omap2_uart1_sdma_reqs, .main_clk = "uart1_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, @@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = { .mpu_irqs = omap2_uart2_mpu_irqs, .sdma_reqs = omap2_uart2_sdma_reqs, .main_clk = "uart2_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = CORE_MOD, @@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = { .mpu_irqs = omap2_uart3_mpu_irqs, .sdma_reqs = omap2_uart3_sdma_reqs, .main_clk = "uart3_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS | + HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, @@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = { .mpu_irqs = uart4_mpu_irqs, .sdma_reqs = uart4_sdma_reqs, .main_clk = "uart4_fck", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .prcm = { .omap2 = { .module_offs = OMAP3430_PER_MOD, diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index d04b5e60fdbe..9c3b504477d7 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = { .name = "uart3", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET | - HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .main_clk = "func_48m_fclk", .prcm = { .omap4 = { @@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = { .name = "uart4", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT, .main_clk = "func_48m_fclk", .prcm = { .omap4 = { diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index f37ae96b70a1..3c70f5c1860f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c @@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = { .name = "uart3", .class = &omap54xx_uart_hwmod_class, .clkdm_name = "l4per_clkdm", - .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, + .flags = DEBUG_OMAP4UART3_FLAGS, .main_clk = "func_48m_fclk", .prcm = { .omap4 = { @@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = { .name = "uart4", .class = &omap54xx_uart_hwmod_class, .clkdm_name = "l4per_clkdm", + .flags = DEBUG_OMAP4UART4_FLAGS, .main_clk = "func_48m_fclk", .prcm = { .omap4 = { diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 3a674de6cb63..a388f8c1bcb3 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void) pr_info("%s used as console in debug mode: uart%d clocks will not be gated", uart_name, uart->num); } - - /* - * omap-uart can be used for earlyprintk logs - * So if omap-uart is used as console then prevent - * uart reset and idle to get logs from omap-uart - * until uart console driver is available to take - * care for console messages. - * Idling or resetting omap-uart while printing logs - * early boot logs can stall the boot-up. - */ - oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET; } } while (1); diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index e115f6742107..c5be60d85e4b 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -1162,9 +1162,6 @@ static void __init eva_init(void) gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */ - /* Touchscreen */ - gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */ - /* GETHER */ gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index d5554646916c..3354a85c90f7 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c @@ -167,7 +167,13 @@ static const struct pinctrl_map bockw_pinctrl_map[] = { "usb1", "usb1"), /* SDHI0 */ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", - "sdhi0", "sdhi0"), + "sdhi0_data4", "sdhi0"), + PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", + "sdhi0_ctrl", "sdhi0"), + PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", + "sdhi0_cd", "sdhi0"), + PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", + "sdhi0_wp", "sdhi0"), }; #define FPGA 0x18200000 diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index d73e21d3ea8a..8d6bd5c5efb9 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c @@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = { #define GPIO_KEY(c, g, d, ...) \ { .code = c, .gpio = g, .desc = d, .active_low = 1 } -static __initdata struct gpio_keys_button gpio_buttons[] = { +static struct gpio_keys_button gpio_buttons[] = { GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"), GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"), GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"), diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S index 78ebc7559f53..4c09bae86edf 100644 --- a/arch/arm/mach-sti/headsmp.S +++ b/arch/arm/mach-sti/headsmp.S @@ -16,8 +16,6 @@ #include <linux/linkage.h> #include <linux/init.h> - __INIT - /* * ST specific entry point for secondary CPUs. This provides * a "holding pen" into which all secondary cores are held until we're diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 46b3beb4b773..717031a762c2 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -35,6 +35,7 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; struct vm_area_struct *vma; + unsigned long start, end; unsigned long range_start; unsigned long range_end; unsigned int nr; @@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = fullmm; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->vma = NULL; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c index f91431963452..7de083d19b7e 100644 --- a/arch/avr32/boards/atngw100/mrmt.c +++ b/arch/avr32/boards/atngw100/mrmt.c @@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = { static struct platform_device rmt_ts_device = { .name = "ucb1400_ts", .id = -1, - } }; #endif diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 33a97929d055..77d442ab28c8 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -158,6 +158,7 @@ source "kernel/Kconfig.hz" endmenu source "init/Kconfig" +source "kernel/Kconfig.freezer" source "drivers/Kconfig" source "fs/Kconfig" diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index ef3a9de01954..bc5efc7c3f3f 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -22,7 +22,7 @@ * unmapping a portion of the virtual address space, these hooks are called according to * the following template: * - * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM + * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM * { * for each vma that needs a shootdown do { * tlb_start_vma(tlb, vma); @@ -58,6 +58,7 @@ struct mmu_gather { unsigned int max; unsigned char fullmm; /* non-zero means full mm flush */ unsigned char need_flush; /* really unmapped some PTEs? */ + unsigned long start, end; unsigned long start_addr; unsigned long end_addr; struct page **pages; @@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; tlb->nr = 0; - tlb->fullmm = full_mm_flush; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->start_addr = ~0UL; } diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d22a4ecffff4..4fab52294d98 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -28,7 +28,7 @@ config MICROBLAZE select GENERIC_CLOCKEVENTS select GENERIC_IDLE_POLL_SETUP select MODULES_USE_ELF_RELA - select CLONE_BACKWARDS + select CLONE_BACKWARDS3 config SWAP def_bool n diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 1dc086087a72..fa44f3ec5302 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -17,6 +17,8 @@ #define current_cpu_type() current_cpu_data.cputype #endif +#define boot_cpu_type() cpu_data[0].cputype + /* * SMP assumption: Options of CPU 0 are a superset of all processors. * This is true for all known MIPS systems. diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 159abc8842d2..126da74d4c55 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void) int i, cpu = 1, boot_cpu = 0; #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) + int cpu_hw_intr; + /* arbitration priority */ clear_c0_brcm_cmt_ctrl(0x30); @@ -80,8 +82,12 @@ static void __init bmips_smp_setup(void) * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output */ - change_c0_brcm_cmt_intr(0xf8018000, - (0x02 << 27) | (0x03 << 15)); + if (boot_cpu == 0) + cpu_hw_intr = 0x02; + else + cpu_hw_intr = 0x1d; + + change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15)); /* single core, 2 threads (2 pipelines) */ max_cpus = 2; diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index e4b1140cdae0..3a2b6e9f25cf 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr) reg.control[i] |= M_PERFCTL_USER; if (ctr[i].exl) reg.control[i] |= M_PERFCTL_EXL; - if (current_cpu_type() == CPU_XLR) + if (boot_cpu_type() == CPU_XLR) reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; reg.counter[i] = 0x80000000 - ctr[i].count; } diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c index d22dc0d6f289..2b7e837dc2e2 100644 --- a/arch/mips/pnx833x/common/platform.c +++ b/arch/mips/pnx833x/common/platform.c @@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = { .end = PNX8335_IP3902_PORTS_END, .flags = IORESOURCE_MEM, }, +#ifdef CONFIG_SOC_PNX8335 [1] = { .start = PNX8335_PIC_ETHERNET_INT, .end = PNX8335_PIC_ETHERNET_INT, .flags = IORESOURCE_IRQ, }, +#endif }; static struct platform_device pnx833x_ethernet_device = { diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 99dbab1c59ac..d60bf98fa5cf 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -55,6 +55,7 @@ config GENERIC_CSUM source "init/Kconfig" +source "kernel/Kconfig.freezer" menu "Processor type and features" diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3bf72cd2c8fc..dbd9d3c991e8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -566,7 +566,7 @@ config SCHED_SMT config PPC_DENORMALISATION bool "PowerPC denormalisation exception handling" depends on PPC_BOOK3S_64 - default "n" + default "y" if PPC_POWERNV ---help--- Add support for handling denormalisation of single precision values. Useful for bare metal only. If unsure say Y here. diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 47a35b08b963..e378cccfca55 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -247,6 +247,10 @@ struct thread_struct { unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ struct pt_regs ckpt_regs; /* Checkpointed registers */ + unsigned long tm_tar; + unsigned long tm_ppr; + unsigned long tm_dscr; + /* * Transactional FP and VSX 0-31 register set. * NOTE: the sense of these is the opposite of the integer ckpt_regs! diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a6840e4e24f7..99222e27f173 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -254,19 +254,28 @@ #define SPRN_HRMOR 0x139 /* Real mode offset register */ #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ +/* HFSCR and FSCR bit numbers are the same */ +#define FSCR_TAR_LG 8 /* Enable Target Address Register */ +#define FSCR_EBB_LG 7 /* Enable Event Based Branching */ +#define FSCR_TM_LG 5 /* Enable Transactional Memory */ +#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */ +#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/ +#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */ +#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */ +#define FSCR_FP_LG 0 /* Enable Floating Point */ #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ -#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ -#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ -#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ +#define FSCR_TAR __MASK(FSCR_TAR_LG) +#define FSCR_EBB __MASK(FSCR_EBB_LG) +#define FSCR_DSCR __MASK(FSCR_DSCR_LG) #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ -#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ -#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ -#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */ -#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */ -#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/ -#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ -#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */ -#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */ +#define HFSCR_TAR __MASK(FSCR_TAR_LG) +#define HFSCR_EBB __MASK(FSCR_EBB_LG) +#define HFSCR_TM __MASK(FSCR_TM_LG) +#define HFSCR_PM __MASK(FSCR_PM_LG) +#define HFSCR_BHRB __MASK(FSCR_BHRB_LG) +#define HFSCR_DSCR __MASK(FSCR_DSCR_LG) +#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG) +#define HFSCR_FP __MASK(FSCR_FP_LG) #define SPRN_TAR 0x32f /* Target Address Register */ #define SPRN_LPCR 0x13E /* LPAR Control Register */ #define LPCR_VPM0 (1ul << (63-0)) diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 49a13e0ef234..294c2cedcf7a 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); +#ifdef CONFIG_PPC_BOOK3S_64 +static inline void save_tar(struct thread_struct *prev) +{ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + prev->tar = mfspr(SPRN_TAR); +} +#else +static inline void save_tar(struct thread_struct *prev) {} +#endif extern void giveup_fpu(struct task_struct *); extern void load_up_fpu(void); diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index c7e8afc2ead0..8207459efe56 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -138,6 +138,9 @@ int main(void) DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); + DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar)); + DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); + DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, transact_vr[0])); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index ea9414c8088d..55593ee2d5aa 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = { static int __init eeh_init_proc(void) { - if (machine_is(pseries)) + if (machine_is(pseries) || machine_is(powernv)) proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); return 0; } diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index ab15b8d057ad..2bd0b885b0fe 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) #ifdef CONFIG_PPC_BOOK3S_64 BEGIN_FTR_SECTION - /* - * Back up the TAR across context switches. Note that the TAR is not - * available for use in the kernel. (To provide this, the TAR should - * be backed up/restored on exception entry/exit instead, and be in - * pt_regs. FIXME, this should be in pt_regs anyway (for debug).) - */ - mfspr r0,SPRN_TAR - std r0,THREAD_TAR(r3) - /* Event based branch registers */ mfspr r0, SPRN_BESCR std r0, THREAD_BESCR(r3) @@ -584,9 +575,34 @@ BEGIN_FTR_SECTION ld r7,DSCR_DEFAULT@toc(2) ld r0,THREAD_DSCR(r4) cmpwi r6,0 + li r8, FSCR_DSCR bne 1f ld r0,0(r7) -1: cmpd r0,r25 + b 3f +1: + BEGIN_FTR_SECTION_NESTED(70) + mfspr r6, SPRN_FSCR + or r6, r6, r8 + mtspr SPRN_FSCR, r6 + BEGIN_FTR_SECTION_NESTED(69) + mfspr r6, SPRN_HFSCR + or r6, r6, r8 + mtspr SPRN_HFSCR, r6 + END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69) + b 4f + END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70) +3: + BEGIN_FTR_SECTION_NESTED(70) + mfspr r6, SPRN_FSCR + andc r6, r6, r8 + mtspr SPRN_FSCR, r6 + BEGIN_FTR_SECTION_NESTED(69) + mfspr r6, SPRN_HFSCR + andc r6, r6, r8 + mtspr SPRN_HFSCR, r6 + END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69) + END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70) +4: cmpd r0,r25 beq 2f mtspr SPRN_DSCR,r0 2: diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 4e00d223b2e3..902ca3c6b4b6 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline: . = 0x4f80 SET_SCRATCH0(r13) EXCEPTION_PROLOG_0(PACA_EXGEN) - b facility_unavailable_relon_hv + b hv_facility_unavailable_relon_hv STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) #ifdef CONFIG_PPC_DENORMALISATION @@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) b .ret_from_except STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) + STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) .align 7 .globl __end_handlers @@ -1188,7 +1189,7 @@ __end_handlers: STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) - STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable) + STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable) #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index c517dbe705fd..8083be20fe5e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev, struct ppc64_tlb_batch *batch; #endif + /* Back up the TAR across context switches. + * Note that the TAR is not available for use in the kernel. (To + * provide this, the TAR should be backed up/restored on exception + * entry/exit instead, and be in pt_regs. FIXME, this should be in + * pt_regs anyway (for debug).) + * Save the TAR here before we do treclaim/trecheckpoint as these + * will change the TAR. + */ + save_tar(&prev->thread); + __switch_to_tm(prev); #ifdef CONFIG_SMP diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 51be8fb24803..0554d1f6d70d 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -233,6 +233,16 @@ dont_backup_fp: std r5, _CCR(r7) std r6, _XER(r7) + + /* ******************** TAR, PPR, DSCR ********** */ + mfspr r3, SPRN_TAR + mfspr r4, SPRN_PPR + mfspr r5, SPRN_DSCR + + std r3, THREAD_TM_TAR(r12) + std r4, THREAD_TM_PPR(r12) + std r5, THREAD_TM_DSCR(r12) + /* MSR and flags: We don't change CRs, and we don't need to alter * MSR. */ @@ -347,6 +357,16 @@ dont_restore_fp: mtmsr r6 /* FP/Vec off again! */ restore_gprs: + + /* ******************** TAR, PPR, DSCR ********** */ + ld r4, THREAD_TM_TAR(r3) + ld r5, THREAD_TM_PPR(r3) + ld r6, THREAD_TM_DSCR(r3) + + mtspr SPRN_TAR, r4 + mtspr SPRN_PPR, r5 + mtspr SPRN_DSCR, r6 + /* ******************** CR,LR,CCR,MSR ********** */ ld r3, _CTR(r7) ld r4, _LINK(r7) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bf33c22e38a4..e435bc089ea3 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -44,9 +44,7 @@ #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/pmc.h> -#ifdef CONFIG_PPC32 #include <asm/reg.h> -#endif #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif @@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs) die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); } +#ifdef CONFIG_PPC64 void facility_unavailable_exception(struct pt_regs *regs) { static char *facility_strings[] = { - "FPU", - "VMX/VSX", - "DSCR", - "PMU SPRs", - "BHRB", - "TM", - "AT", - "EBB", - "TAR", + [FSCR_FP_LG] = "FPU", + [FSCR_VECVSX_LG] = "VMX/VSX", + [FSCR_DSCR_LG] = "DSCR", + [FSCR_PM_LG] = "PMU SPRs", + [FSCR_BHRB_LG] = "BHRB", + [FSCR_TM_LG] = "TM", + [FSCR_EBB_LG] = "EBB", + [FSCR_TAR_LG] = "TAR", }; - char *facility, *prefix; + char *facility = "unknown"; u64 value; + u8 status; + bool hv; - if (regs->trap == 0xf60) { - value = mfspr(SPRN_FSCR); - prefix = ""; - } else { + hv = (regs->trap == 0xf80); + if (hv) value = mfspr(SPRN_HFSCR); - prefix = "Hypervisor "; + else + value = mfspr(SPRN_FSCR); + + status = value >> 56; + if (status == FSCR_DSCR_LG) { + /* User is acessing the DSCR. Set the inherit bit and allow + * the user to set it directly in future by setting via the + * H/FSCR DSCR bit. + */ + current->thread.dscr_inherit = 1; + if (hv) + mtspr(SPRN_HFSCR, value | HFSCR_DSCR); + else + mtspr(SPRN_FSCR, value | FSCR_DSCR); + return; } - value = value >> 56; + if ((status < ARRAY_SIZE(facility_strings)) && + facility_strings[status]) + facility = facility_strings[status]; /* We restore the interrupt state now */ if (!arch_irq_disabled_regs(regs)) local_irq_enable(); - if (value < ARRAY_SIZE(facility_strings)) - facility = facility_strings[value]; - else - facility = "unknown"; - pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", - prefix, facility, regs->nip, regs->msr); + hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); if (user_mode(regs)) { _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); @@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs) die("Unexpected facility unavailable exception", regs, SIGABRT); } +#endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 2efa9dde741a..7629cd3eb91a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) rma_size <<= PAGE_SHIFT; rmls = lpcr_rmls(rma_size); err = -EINVAL; - if (rmls < 0) { + if ((long)rmls < 0) { pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); goto out_srcu; } @@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) /* Allocate the guest's logical partition ID */ lpid = kvmppc_alloc_lpid(); - if (lpid < 0) + if ((long)lpid < 0) return -ENOMEM; kvm->arch.lpid = lpid; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 19498a567a81..c6e13d9a9e15 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) if (err) goto free_shadow_vcpu; + err = -ENOMEM; p = __get_free_page(GFP_KERNEL|__GFP_ZERO); - /* the real shared page fills the last 4k of our page */ - vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); if (!p) goto uninit_vcpu; + /* the real shared page fills the last 4k of our page */ + vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); #ifdef CONFIG_PPC_BOOK3S_64 /* default to book3s_64 (970fx) */ diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 9f8671a44551..6a5f2b1f32ca 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -569,35 +569,6 @@ error: return ret; } -static int unzip_oops(char *oops_buf, char *big_buf) -{ - struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; - u64 timestamp = oops_hdr->timestamp; - char *big_oops_data = NULL; - char *oops_data_buf = NULL; - size_t big_oops_data_sz; - int unzipped_len; - - big_oops_data = big_buf + sizeof(struct oops_log_info); - big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info); - oops_data_buf = oops_buf + sizeof(struct oops_log_info); - - unzipped_len = nvram_decompress(oops_data_buf, big_oops_data, - oops_hdr->report_length, - big_oops_data_sz); - - if (unzipped_len < 0) { - pr_err("nvram: decompression failed; returned %d\n", - unzipped_len); - return -1; - } - oops_hdr = (struct oops_log_info *)big_buf; - oops_hdr->version = OOPS_HDR_VERSION; - oops_hdr->report_length = (u16) unzipped_len; - oops_hdr->timestamp = timestamp; - return 0; -} - static int nvram_pstore_open(struct pstore_info *psi) { /* Reset the iterator to start reading partitions again */ @@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, unsigned int err_type, id_no, size = 0; struct nvram_os_partition *part = NULL; char *buff = NULL, *big_buff = NULL; - int rc, sig = 0; + int sig = 0; loff_t p; -read_partition: read_type++; switch (nvram_type_ids[read_type]) { @@ -749,30 +719,46 @@ read_partition: *id = id_no; if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { + int length, unzipped_len; + size_t hdr_size; + oops_hdr = (struct oops_log_info *)buff; - *buf = buff + sizeof(*oops_hdr); + if (oops_hdr->version < OOPS_HDR_VERSION) { + /* Old format oops header had 2-byte record size */ + hdr_size = sizeof(u16); + length = oops_hdr->version; + time->tv_sec = 0; + time->tv_nsec = 0; + } else { + hdr_size = sizeof(*oops_hdr); + length = oops_hdr->report_length; + time->tv_sec = oops_hdr->timestamp; + time->tv_nsec = 0; + } + *buf = kmalloc(length, GFP_KERNEL); + if (*buf == NULL) + return -ENOMEM; + memcpy(*buf, buff + hdr_size, length); + kfree(buff); if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); if (!big_buff) return -ENOMEM; - rc = unzip_oops(buff, big_buff); + unzipped_len = nvram_decompress(*buf, big_buff, + length, big_oops_buf_sz); - if (rc != 0) { - kfree(buff); + if (unzipped_len < 0) { + pr_err("nvram: decompression failed, returned " + "rc %d\n", unzipped_len); kfree(big_buff); - goto read_partition; + } else { + *buf = big_buff; + length = unzipped_len; } - - oops_hdr = (struct oops_log_info *)big_buff; - *buf = big_buff + sizeof(*oops_hdr); - kfree(buff); } - - time->tv_sec = oops_hdr->timestamp; - time->tv_nsec = 0; - return oops_hdr->report_length; + return length; } *buf = buff; @@ -816,6 +802,7 @@ static int nvram_pstore_init(void) static void __init nvram_init_oops_partition(int rtas_partition_exists) { int rc; + size_t size; rc = pseries_nvram_init_os_partition(&oops_log_partition); if (rc != 0) { @@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists) big_oops_buf_sz = (oops_data_sz * 100) / 45; big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); if (big_oops_buf) { - stream.workspace = kmalloc(zlib_deflate_workspacesize( - WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); + size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL), + zlib_inflate_workspacesize()); + stream.workspace = kmalloc(size, GFP_KERNEL); if (!stream.workspace) { pr_err("nvram: No memory for compression workspace; " "skipping compression of %s partition data\n", diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 22f75b504f7f..8a4cae78f03c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -118,6 +118,7 @@ config S390 select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZ4 select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZO select HAVE_KERNEL_XZ @@ -227,11 +228,12 @@ config MARCH_Z196 not work on older machines. config MARCH_ZEC12 - bool "IBM zEC12" + bool "IBM zBC12 and zEC12" select HAVE_MARCH_ZEC12_FEATURES if 64BIT help - Select this to enable optimizations for IBM zEC12 (2827 series). The - kernel will be slightly faster but will not work on older machines. + Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and + 2827 series). The kernel will be slightly faster but will not work on + older machines. endchoice @@ -709,6 +711,7 @@ config S390_GUEST def_bool y prompt "s390 support for virtio devices" depends on 64BIT + select TTY select VIRTUALIZATION select VIRTIO select VIRTIO_CONSOLE diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 3ad8f61c9985..866ecbe670e4 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -6,9 +6,9 @@ BITS := $(if $(CONFIG_64BIT),64,31) -targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ - vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \ - sizes.h head$(BITS).o +targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 +targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 +targets += misc.o piggy.o sizes.h head$(BITS).o KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING @@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +suffix-$(CONFIG_KERNEL_LZ4) := lz4 suffix-$(CONFIG_KERNEL_LZMA) := lzma suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_KERNEL_XZ) := xz @@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) $(call if_changed,gzip) $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) $(call if_changed,bzip2) +$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) + $(call if_changed,lz4) $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) $(call if_changed,lzma) $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index c4c6a1cf221b..57cbaff1f397 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr; #include "../../../../lib/decompress_bunzip2.c" #endif +#ifdef CONFIG_KERNEL_LZ4 +#include "../../../../lib/decompress_unlz4.c" +#endif + #ifdef CONFIG_KERNEL_LZMA #include "../../../../lib/decompress_unlzma.c" #endif diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 4d8604e311f3..7d4676758733 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr, size -= offset; p = addr + offset / BITS_PER_LONG; if (bit) { - set = __flo_word(0, *p & (~0UL << bit)); + set = __flo_word(0, *p & (~0UL >> bit)); if (set >= size) return size + offset; if (set < BITS_PER_LONG) diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b75d7d686684..23a64d25f2b1 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -32,6 +32,7 @@ struct mmu_gather { struct mm_struct *mm; struct mmu_table_batch *batch; unsigned int fullmm; + unsigned long start, unsigned long end; }; struct mmu_table_batch { @@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned int full_mm_flush) + unsigned long start, + unsigned long end) { tlb->mm = mm; - tlb->fullmm = full_mm_flush; + tlb->start = start; + tlb->end = end; + tlb->fullmm = !(start | (end+1)); tlb->batch = NULL; if (tlb->fullmm) __tlb_flush_mm(mm); diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index a6fc037671b1..500aa1029bcb 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) static bool is_in_guest(struct pt_regs *regs) { - unsigned long ip = instruction_pointer(regs); - if (user_mode(regs)) return false; - - return ip == (unsigned long) &sie_exit; +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + return instruction_pointer(regs) == (unsigned long) &sie_exit; +#else + return false; +#endif } static unsigned long guest_is_user_mode(struct pt_regs *regs) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 497451ec5e26..aeed8a61fa0d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -994,6 +994,7 @@ static void __init setup_hwcaps(void) strcpy(elf_platform, "z196"); break; case 0x2827: + case 0x2828: strcpy(elf_platform, "zEC12"); break; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba694d2ba51e..34c1c9a90be2 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -702,14 +702,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) return rc; vcpu->arch.sie_block->icptcode = 0; - preempt_disable(); - kvm_guest_enter(); - preempt_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); trace_kvm_s390_sie_enter(vcpu, atomic_read(&vcpu->arch.sie_block->cpuflags)); + + /* + * As PF_VCPU will be used in fault handler, between guest_enter + * and guest_exit should be no uaccess. + */ + preempt_disable(); + kvm_guest_enter(); + preempt_enable(); rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); + kvm_guest_exit(); + + VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", + vcpu->arch.sie_block->icptcode); + trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); + if (rc > 0) rc = 0; if (rc < 0) { @@ -721,10 +732,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } } - VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", - vcpu->arch.sie_block->icptcode); - trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); - kvm_guest_exit(); memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); return rc; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 0da3e6eb6be6..4cdc54e63ebc 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -16,6 +16,7 @@ #include <linux/errno.h> #include <linux/compat.h> #include <asm/asm-offsets.h> +#include <asm/facility.h> #include <asm/current.h> #include <asm/debug.h> #include <asm/ebcdic.h> @@ -532,8 +533,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* Only provide non-quiescing support if the host supports it */ - if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && - S390_lowcore.stfl_fac_list & 0x00020000) + if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* No support for conditional-SSKE */ diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ce36ea80e4f9..ad446b0c55b6 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -69,6 +69,7 @@ static void __init setup_zero_pages(void) order = 2; break; case 0x2827: /* zEC12 */ + case 0x2828: /* zEC12 */ default: order = 5; break; diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index ffeb17ce7f31..930783d2c99b 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) switch (id.machine) { case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; - case 0x2827: ops->cpu_type = "s390/zEC12"; break; + case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break; default: return -ENODEV; } } diff --git a/arch/score/Kconfig b/arch/score/Kconfig index c8def8bc9020..5fc237581caf 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig @@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT source "init/Kconfig" +source "kernel/Kconfig.freezer" + config MMU def_bool y diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index e61d43d9f689..362192ed12fe 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = full_mm_flush; + tlb->start = start; + tlb->end = end; + tlb->fullmm = !(start | (end+1)); init_tlb_gather(tlb); } diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 4febacd1a8a1..29b0301c18aa 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) } static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = full_mm_flush; + tlb->start = start; + tlb->end = end; + tlb->fullmm = !(start | (end+1)); init_tlb_gather(tlb); } diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index d606463aa6d6..b7388a425f09 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr) unsigned long nr_pages; nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; - efi_call_phys2(sys_table->boottime->free_pages, addr, size); + efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages); } static void find_bits(unsigned long mask, u8 *pos, u8 *size) diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index f2b489cf1602..3bf2dd0cf61f 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) #endif +#ifdef CONFIG_MEM_SOFT_DIRTY + +/* + * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and + * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset + * into this range. + */ +#define PTE_FILE_MAX_BITS 28 +#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) +#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) +#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) +#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1) +#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) +#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) +#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) + +#define pte_to_pgoff(pte) \ + ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ + & ((1U << PTE_FILE_BITS1) - 1))) \ + + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ + & ((1U << PTE_FILE_BITS2) - 1)) \ + << (PTE_FILE_BITS1)) \ + + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ + & ((1U << PTE_FILE_BITS3) - 1)) \ + << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ + + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ + << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) + +#define pgoff_to_pte(off) \ + ((pte_t) { .pte_low = \ + ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ + + ((((off) >> PTE_FILE_BITS1) \ + & ((1U << PTE_FILE_BITS2) - 1)) \ + << PTE_FILE_SHIFT2) \ + + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ + & ((1U << PTE_FILE_BITS3) - 1)) \ + << PTE_FILE_SHIFT3) \ + + ((((off) >> \ + (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ + << PTE_FILE_SHIFT4) \ + + _PAGE_FILE }) + +#else /* CONFIG_MEM_SOFT_DIRTY */ + /* * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, - * split up the 29 bits of offset into this range: + * split up the 29 bits of offset into this range. */ #define PTE_FILE_MAX_BITS 29 #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) @@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) << PTE_FILE_SHIFT3) \ + _PAGE_FILE }) +#endif /* CONFIG_MEM_SOFT_DIRTY */ + /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 4cc9f2b7cdc3..81bb91b49a88 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. + * + * For soft-dirty tracking 11 bit is taken from + * the low part of pte as well. */ #define pte_to_pgoff(pte) ((pte).pte_high) #define pgoff_to_pte(off) \ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 7dc305a46058..1c00631164c2 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); } +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); +} + +static inline pte_t pte_file_clear_soft_dirty(pte_t pte) +{ + return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); +} + +static inline pte_t pte_file_mksoft_dirty(pte_t pte) +{ + return pte_set_flags(pte, _PAGE_SOFT_DIRTY); +} + +static inline int pte_file_soft_dirty(pte_t pte) +{ + return pte_flags(pte) & _PAGE_SOFT_DIRTY; +} + /* * Mask out unsupported bits in a present pgprot. Non-present pgprots * can use those bits for other purposes, so leave them be. diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index c98ac63aae48..f4843e031131 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -61,12 +61,27 @@ * they do not conflict with each other. */ +#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN + #ifdef CONFIG_MEM_SOFT_DIRTY -#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) +#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) #else #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) #endif +/* + * Tracking soft dirty bit when a page goes to a swap is tricky. + * We need a bit which can be stored in pte _and_ not conflict + * with swap entry format. On x86 bits 6 and 7 are *not* involved + * into swap entry computation, but bit 6 is used for nonlinear + * file mapping, so we borrow bit 7 for soft dirty tracking. + */ +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE +#else +#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) +#endif + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) #else diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 33692eaabab5..e3ddd7db723f 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() -/* The {read|write|spin}_lock() on x86 are full memory barriers. */ -static inline void smp_mb__after_lock(void) { } -#define ARCH_HAS_SMP_MB_AFTER_LOCK - #endif /* _ASM_X86_SPINLOCK_H */ diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index fbc9210b45bc..a45d8d4ace10 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void) case 70: case 71: case 63: + case 69: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index cad791dbde95..1fb6c72717bd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = { static struct uncore_event_desc snbep_uncore_qpi_events[] = { INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), - INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), - INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), + INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), + INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), { /* end: all zeroes */ }, }; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 94ab6b90dd3f..63bdb29b2549 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func) static void __init intel_remapping_check(int num, int slot, int func) { u8 revision; + u16 device; + device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); /* - * Revision 0x13 of this chipset supports irq remapping - * but has an erratum that breaks its behavior, flag it as such + * Revision 13 of all triggering devices id in this quirk have + * a problem draining interrupts when irq remapping is enabled, + * and should be flagged as broken. Additionally revisions 0x12 + * and 0x22 of device id 0x3405 has this problem. */ if (revision == 0x13) set_irq_remapping_broken(); + else if ((device == 0x3405) && + ((revision == 0x12) || + (revision == 0x22))) + set_irq_remapping_broken(); } @@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = { PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, + { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, {} diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 202d24f0f7e7..5d576ab34403 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void) if (cpu_has_fxsr) { memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); - asm volatile("fxsave %0" : : "m" (fx_scratch)); + asm volatile("fxsave %0" : "+m" (fx_scratch)); mask = fx_scratch.mxcsr_mask; if (mask == 0) mask = 0x0000ffbf; diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 47ebb1dbfbcb..7a0adb7ee433 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -220,12 +220,13 @@ int apply_microcode_amd(int cpu) return 0; } - if (__apply_microcode_amd(mc_amd)) + if (__apply_microcode_amd(mc_amd)) { pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); - else - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, - mc_amd->hdr.patch_id); + return -1; + } + pr_info("CPU%d: new patch_level=0x%08x\n", cpu, + mc_amd->hdr.patch_id); uci->cpu_sig.rev = mc_amd->hdr.patch_id; c->microcode = mc_amd->hdr.patch_id; diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb81..48f8375e4c6b 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = new_begin; } } else { - *begin = TASK_UNMAPPED_BASE; + *begin = mmap_legacy_base(); *end = TASK_SIZE; } } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 62c29a5bfe26..f63778cb2363 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -98,7 +98,7 @@ static unsigned long mmap_base(void) * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 * does, but not when emulating X86_32 */ -static unsigned long mmap_legacy_base(void) +unsigned long mmap_legacy_base(void) { if (mmap_is_ia32()) return TASK_UNMAPPED_BASE; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index fd6c51cc3acb..5a74a9c1e42c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device) /* Clean up. */ per_cpu(processor_device_array, pr->id) = NULL; per_cpu(processors, pr->id) = NULL; - try_offline_node(cpu_to_node(pr->id)); /* Remove the CPU. */ get_online_cpus(); @@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device) acpi_unmap_lsapic(pr->id); put_online_cpus(); + try_offline_node(cpu_to_node(pr->id)); + out: free_cpumask_var(pr->throttling.shared_cpu_map); kfree(pr); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index f68095756fb7..408f6b2a5fa8 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list); static DECLARE_RWSEM(bus_type_sem); #define PHYSICAL_NODE_STRING "physical_node" +#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10) int register_acpi_bus_type(struct acpi_bus_type *type) { @@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) return ret; } -static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, - void *addr_p, void **ret_p) +static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used, + void *not_used, void **ret_p) { - unsigned long long addr, sta; - acpi_status status; + struct acpi_device *adev = NULL; - status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); - if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { + acpi_bus_get_device(handle, &adev); + if (adev) { *ret_p = handle; - status = acpi_bus_get_status_handle(handle, &sta); - if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED)) - return AE_CTRL_TERMINATE; + return AE_CTRL_TERMINATE; } return AE_OK; } -acpi_handle acpi_get_child(acpi_handle parent, u64 address) +static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge) { - void *ret = NULL; + unsigned long long sta; + acpi_status status; + + status = acpi_bus_get_status_handle(handle, &sta); + if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) + return false; + + if (is_bridge) { + void *test = NULL; + + /* Check if this object has at least one child device. */ + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + acpi_dev_present, NULL, NULL, &test); + return !!test; + } + return true; +} + +struct find_child_context { + u64 addr; + bool is_bridge; + acpi_handle ret; + bool ret_checked; +}; + +static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used, + void *data, void **not_used) +{ + struct find_child_context *context = data; + unsigned long long addr; + acpi_status status; - if (!parent) - return NULL; + status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); + if (ACPI_FAILURE(status) || addr != context->addr) + return AE_OK; - acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL, - do_acpi_find_child, &address, &ret); - return (acpi_handle)ret; + if (!context->ret) { + /* This is the first matching object. Save its handle. */ + context->ret = handle; + return AE_OK; + } + /* + * There is more than one matching object with the same _ADR value. + * That really is unexpected, so we are kind of beyond the scope of the + * spec here. We have to choose which one to return, though. + * + * First, check if the previously found object is good enough and return + * its handle if so. Second, check the same for the object that we've + * just found. + */ + if (!context->ret_checked) { + if (acpi_extra_checks_passed(context->ret, context->is_bridge)) + return AE_CTRL_TERMINATE; + else + context->ret_checked = true; + } + if (acpi_extra_checks_passed(handle, context->is_bridge)) { + context->ret = handle; + return AE_CTRL_TERMINATE; + } + return AE_OK; } -EXPORT_SYMBOL(acpi_get_child); + +acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge) +{ + if (parent) { + struct find_child_context context = { + .addr = addr, + .is_bridge = is_bridge, + }; + + acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child, + NULL, &context, NULL); + return context.ret; + } + return NULL; +} +EXPORT_SYMBOL_GPL(acpi_find_child); int acpi_bind_one(struct device *dev, acpi_handle handle) { struct acpi_device *acpi_dev; acpi_status status; struct acpi_device_physical_node *physical_node, *pn; - char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; + char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; + struct list_head *physnode_list; + unsigned int node_id; int retval = -EINVAL; if (ACPI_HANDLE(dev)) { @@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) mutex_lock(&acpi_dev->physical_node_lock); - /* Sanity check. */ - list_for_each_entry(pn, &acpi_dev->physical_node_list, node) + /* + * Keep the list sorted by node_id so that the IDs of removed nodes can + * be recycled easily. + */ + physnode_list = &acpi_dev->physical_node_list; + node_id = 0; + list_for_each_entry(pn, &acpi_dev->physical_node_list, node) { + /* Sanity check. */ if (pn->dev == dev) { dev_warn(dev, "Already associated with ACPI node\n"); goto err_free; } - - /* allocate physical node id according to physical_node_id_bitmap */ - physical_node->node_id = - find_first_zero_bit(acpi_dev->physical_node_id_bitmap, - ACPI_MAX_PHYSICAL_NODE); - if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { - retval = -ENOSPC; - goto err_free; + if (pn->node_id == node_id) { + physnode_list = &pn->node; + node_id++; + } } - set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); + physical_node->node_id = node_id; physical_node->dev = dev; - list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); + list_add(&physical_node->node, physnode_list); acpi_dev->physical_node_count++; mutex_unlock(&acpi_dev->physical_node_lock); @@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev) mutex_lock(&acpi_dev->physical_node_lock); list_for_each_safe(node, next, &acpi_dev->physical_node_list) { - char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; + char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; entry = list_entry(node, struct acpi_device_physical_node, node); @@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev) continue; list_del(node); - clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap); acpi_dev->physical_node_count--; diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index aa1227a7e3f2..04a13784dd20 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) dev->pnp.bus_id, (u32) dev->wakeup.sleep_state); + mutex_lock(&dev->physical_node_lock); + if (!dev->physical_node_count) { seq_printf(seq, "%c%-8s\n", dev->wakeup.flags.run_wake ? '*' : ' ', @@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) put_device(ldev); } } + + mutex_unlock(&dev->physical_node_lock); } mutex_unlock(&acpi_device_lock); return 0; @@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev) { struct acpi_device_physical_node *entry; + mutex_lock(&adev->physical_node_lock); + list_for_each_entry(entry, &adev->physical_node_list, node) if (entry->dev && device_can_wakeup(entry->dev)) { bool enable = !device_may_wakeup(entry->dev); device_set_wakeup_enable(entry->dev, enable); } + + mutex_unlock(&adev->physical_node_lock); } static ssize_t diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 0ec434d2586d..e1284b8dc6ee 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, * Some systems always report current brightness level as maximum * through _BQC, we need to test another value for them. */ - test_level = current_level == max_level ? br->levels[2] : max_level; + test_level = current_level == max_level ? br->levels[3] : max_level; result = acpi_video_device_lcd_set_level(device, test_level); if (result) diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c index 4ec7c04b3f82..26386f0b89a8 100644 --- a/drivers/ata/pata_imx.c +++ b/drivers/ata/pata_imx.c @@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx_pata_dt_ids); static struct platform_driver pata_imx_driver = { .probe = pata_imx_probe, diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index e69102696533..3455f833e473 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, } } - return regcache_sync_block_raw_flush(map, &data, base, regtmp); + return regcache_sync_block_raw_flush(map, &data, base, regtmp + + map->reg_stride); } int regcache_sync_block(struct regmap *map, void *block, diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 99cb944a002d..4d45dba7fb8f 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio) int i; bio_for_each_segment(bv, bio, i) { - page = bv->bv_page; /* Non-zero page count for non-head members of - * compound pages is no longer allowed by the kernel, - * but this has never been seen here. + * compound pages is no longer allowed by the kernel. */ - if (unlikely(PageCompound(page))) - if (compound_trans_head(page) != page) { - pr_crit("page tail used for block I/O\n"); - BUG(); - } + page = compound_trans_head(bv->bv_page); atomic_inc(&page->_count); } } @@ -924,10 +918,13 @@ static void bio_pagedec(struct bio *bio) { struct bio_vec *bv; + struct page *page; int i; - bio_for_each_segment(bv, bio, i) - atomic_dec(&bv->bv_page->_count); + bio_for_each_segment(bv, bio, i) { + page = compound_trans_head(bv->bv_page); + atomic_dec(&page->_count); + } } static void diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 1b456fe9b87a..fc45567ad3ac 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, unsigned long flags; spin_lock_irqsave(&portdev->ports_lock, flags); - list_for_each_entry(port, &portdev->ports, list) - if (port->cdev->dev == dev) + list_for_each_entry(port, &portdev->ports, list) { + if (port->cdev->dev == dev) { + kref_get(&port->kref); goto out; + } + } port = NULL; out: spin_unlock_irqrestore(&portdev->ports_lock, flags); @@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, port = filp->private_data; + /* Port is hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + if (!port_has_data(port)) { /* * If nothing's connected on the host just return 0 in @@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, if (ret < 0) return ret; } - /* Port got hot-unplugged. */ + /* Port got hot-unplugged while we were waiting above. */ if (!port->guest_connected) return -ENODEV; /* @@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, if (is_rproc_serial(port->out_vq->vdev)) return -EINVAL; + /* + * pipe->nrbufs == 0 means there are no data to transfer, + * so this returns just 0 for no data. + */ + pipe_lock(pipe); + if (!pipe->nrbufs) { + ret = 0; + goto error_out; + } + ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); if (ret < 0) - return ret; + goto error_out; buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); - if (!buf) - return -ENOMEM; + if (!buf) { + ret = -ENOMEM; + goto error_out; + } sgl.n = 0; sgl.len = 0; @@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, sgl.sg = buf->sg; sg_init_table(sgl.sg, sgl.size); ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); + pipe_unlock(pipe); if (likely(ret > 0)) ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); if (unlikely(ret <= 0)) free_buf(buf, true); return ret; + +error_out: + pipe_unlock(pipe); + return ret; } static unsigned int port_fops_poll(struct file *filp, poll_table *wait) @@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp) struct port *port; int ret; + /* We get the port with a kref here */ port = find_port_by_devt(cdev->dev); + if (!port) { + /* Port was unplugged before we could proceed */ + return -ENXIO; + } filp->private_data = port; - /* Prevent against a port getting hot-unplugged at the same time */ - spin_lock_irq(&port->portdev->ports_lock); - kref_get(&port->kref); - spin_unlock_irq(&port->portdev->ports_lock); - /* * Don't allow opening of console port devices -- that's done * via /dev/hvc @@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref) port = container_of(kref, struct port, kref); - sysfs_remove_group(&port->dev->kobj, &port_attribute_group); - device_destroy(pdrvdata.class, port->dev->devt); - cdev_del(port->cdev); - - kfree(port->name); - - debugfs_remove(port->debugfs_file); - kfree(port); } @@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port) spin_unlock_irq(&port->portdev->ports_lock); if (port->guest_connected) { + /* Let the app know the port is going down. */ + send_sigio_to_port(port); + + /* Do this after sigio is actually sent */ port->guest_connected = false; port->host_connected = false; - wake_up_interruptible(&port->waitqueue); - /* Let the app know the port is going down. */ - send_sigio_to_port(port); + wake_up_interruptible(&port->waitqueue); } if (is_console_port(port)) { @@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port) */ port->portdev = NULL; + sysfs_remove_group(&port->dev->kobj, &port_attribute_group); + device_destroy(pdrvdata.class, port->dev->devt); + cdev_del(port->cdev); + + kfree(port->name); + + debugfs_remove(port->debugfs_file); + /* * Locks around here are not necessary - a port can't be * opened after we removed the port struct from ports_list diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 1bdb882c845b..4e5739773c33 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = { DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), - DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), - DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), + DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3, + CLK_GET_RATE_NOCACHE, 0), + DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3, + CLK_GET_RATE_NOCACHE, 0), DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), - DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), - DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), + DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, + 4, 3, CLK_GET_RATE_NOCACHE, 0), + DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, + 8, 3, CLK_GET_RATE_NOCACHE, 0), DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), }; @@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, - CLK_IGNORE_UNUSED, 0), + CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), }; diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 5c205b60a82a..089d3e30e221 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock); static DEFINE_SPINLOCK(ddrpll_lock); static DEFINE_SPINLOCK(iopll_lock); static DEFINE_SPINLOCK(armclk_lock); +static DEFINE_SPINLOCK(swdtclk_lock); static DEFINE_SPINLOCK(ddrclk_lock); static DEFINE_SPINLOCK(dciclk_lock); static DEFINE_SPINLOCK(gem0clk_lock); @@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np) } clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, - SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); + SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock); /* DDR clocks */ clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, @@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np) CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, &gem0clk_lock); - clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, - SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); + clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, + CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0, + &gem0clk_lock); clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], "gem0_emio_mux", CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); @@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np) CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, &gem1clk_lock); - clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, - SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); + clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, + CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0, + &gem1clk_lock); clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], "gem1_emio_mux", CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 0ceb2eff5a7e..f97cb3d8c5a2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, return count; } -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, - size_t count) +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, + const char *buf, size_t count) { struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; unsigned int input, j; @@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, if (input > 1) input = 1; - if (input == cs_tuners->ignore_nice) /* nothing to do */ + if (input == cs_tuners->ignore_nice_load) /* nothing to do */ return count; - cs_tuners->ignore_nice = input; + cs_tuners->ignore_nice_load = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { @@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, dbs_info = &per_cpu(cs_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->cdbs.prev_cpu_wall, 0); - if (cs_tuners->ignore_nice) + if (cs_tuners->ignore_nice_load) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } @@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate); show_store_one(cs, sampling_down_factor); show_store_one(cs, up_threshold); show_store_one(cs, down_threshold); -show_store_one(cs, ignore_nice); +show_store_one(cs, ignore_nice_load); show_store_one(cs, freq_step); declare_show_sampling_rate_min(cs); @@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate); gov_sys_pol_attr_rw(sampling_down_factor); gov_sys_pol_attr_rw(up_threshold); gov_sys_pol_attr_rw(down_threshold); -gov_sys_pol_attr_rw(ignore_nice); +gov_sys_pol_attr_rw(ignore_nice_load); gov_sys_pol_attr_rw(freq_step); gov_sys_pol_attr_ro(sampling_rate_min); @@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { &sampling_down_factor_gov_sys.attr, &up_threshold_gov_sys.attr, &down_threshold_gov_sys.attr, - &ignore_nice_gov_sys.attr, + &ignore_nice_load_gov_sys.attr, &freq_step_gov_sys.attr, NULL }; @@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { &sampling_down_factor_gov_pol.attr, &up_threshold_gov_pol.attr, &down_threshold_gov_pol.attr, - &ignore_nice_gov_pol.attr, + &ignore_nice_load_gov_pol.attr, &freq_step_gov_pol.attr, NULL }; @@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data) tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; - tuners->ignore_nice = 0; + tuners->ignore_nice_load = 0; tuners->freq_step = DEF_FREQUENCY_STEP; dbs_data->tuners = tuners; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 7b839a8db2a7..e59afaa9da23 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -47,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) unsigned int j; if (dbs_data->cdata->governor == GOV_ONDEMAND) - ignore_nice = od_tuners->ignore_nice; + ignore_nice = od_tuners->ignore_nice_load; else - ignore_nice = cs_tuners->ignore_nice; + ignore_nice = cs_tuners->ignore_nice_load; policy = cdbs->cur_policy; @@ -298,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, cs_tuners = dbs_data->tuners; cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = cs_tuners->sampling_rate; - ignore_nice = cs_tuners->ignore_nice; + ignore_nice = cs_tuners->ignore_nice_load; } else { od_tuners = dbs_data->tuners; od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = od_tuners->sampling_rate; - ignore_nice = od_tuners->ignore_nice; + ignore_nice = od_tuners->ignore_nice_load; od_ops = dbs_data->cdata->gov_ops; io_busy = od_tuners->io_is_busy; } diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 6663ec3b3056..d5f12b4b11b8 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s { /* Per policy Governers sysfs tunables */ struct od_dbs_tuners { - unsigned int ignore_nice; + unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; @@ -175,7 +175,7 @@ struct od_dbs_tuners { }; struct cs_dbs_tuners { - unsigned int ignore_nice; + unsigned int ignore_nice_load; unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 93eb5cbcc1f6..c087347d6688 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, return count; } -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, - size_t count) +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, + const char *buf, size_t count) { struct od_dbs_tuners *od_tuners = dbs_data->tuners; unsigned int input; @@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, if (input > 1) input = 1; - if (input == od_tuners->ignore_nice) { /* nothing to do */ + if (input == od_tuners->ignore_nice_load) { /* nothing to do */ return count; } - od_tuners->ignore_nice = input; + od_tuners->ignore_nice_load = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { @@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); - if (od_tuners->ignore_nice) + if (od_tuners->ignore_nice_load) dbs_info->cdbs.prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; @@ -461,7 +461,7 @@ show_store_one(od, sampling_rate); show_store_one(od, io_is_busy); show_store_one(od, up_threshold); show_store_one(od, sampling_down_factor); -show_store_one(od, ignore_nice); +show_store_one(od, ignore_nice_load); show_store_one(od, powersave_bias); declare_show_sampling_rate_min(od); @@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate); gov_sys_pol_attr_rw(io_is_busy); gov_sys_pol_attr_rw(up_threshold); gov_sys_pol_attr_rw(sampling_down_factor); -gov_sys_pol_attr_rw(ignore_nice); +gov_sys_pol_attr_rw(ignore_nice_load); gov_sys_pol_attr_rw(powersave_bias); gov_sys_pol_attr_ro(sampling_rate_min); @@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { &sampling_rate_gov_sys.attr, &up_threshold_gov_sys.attr, &sampling_down_factor_gov_sys.attr, - &ignore_nice_gov_sys.attr, + &ignore_nice_load_gov_sys.attr, &powersave_bias_gov_sys.attr, &io_is_busy_gov_sys.attr, NULL @@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { &sampling_rate_gov_pol.attr, &up_threshold_gov_pol.attr, &sampling_down_factor_gov_pol.attr, - &ignore_nice_gov_pol.attr, + &ignore_nice_load_gov_pol.attr, &powersave_bias_gov_pol.attr, &io_is_busy_gov_pol.attr, NULL @@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data) } tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; - tuners->ignore_nice = 0; + tuners->ignore_nice_load = 0; tuners->powersave_bias = default_powersave_bias; tuners->io_is_busy = should_io_be_busy(); diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index bb838b985077..9536852c504a 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) clk_put(cpuclk); return -EINVAL; } - ret = clk_set_rate(cpuclk, rate); - if (ret) { - clk_put(cpuclk); - return ret; - } /* clock table init */ for (i = 2; @@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) i++) loongson2_clockmod_table[i].frequency = (rate * i) / 8; + ret = clk_set_rate(cpuclk, rate); + if (ret) { + clk_put(cpuclk); + return ret; + } + policy->cur = loongson2_cpufreq_get(policy->cpu); cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index ce3dc3e9688c..0bbdea5059f3 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -867,6 +867,7 @@ static int pch_dma_probe(struct pci_dev *pdev, if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Cannot find proper base address\n"); + err = -ENODEV; goto err_disable_pdev; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 593827b3fdd4..fa645d825009 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) /* Assign cookies to all nodes */ while (!list_empty(&last->node)) { desc = list_entry(last->node.next, struct dma_pl330_desc, node); + if (pch->cyclic) { + desc->txd.callback = last->txd.callback; + desc->txd.callback_param = last->txd.callback_param; + } dma_cookie_assign(&desc->txd); @@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( size_t period_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { - struct dma_pl330_desc *desc; + struct dma_pl330_desc *desc = NULL, *first = NULL; struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_dmac *pdmac = pch->dmac; + unsigned int i; dma_addr_t dst; dma_addr_t src; - desc = pl330_get_desc(pch); - if (!desc) { - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", - __func__, __LINE__); + if (len % period_len != 0) return NULL; - } - switch (direction) { - case DMA_MEM_TO_DEV: - desc->rqcfg.src_inc = 1; - desc->rqcfg.dst_inc = 0; - desc->req.rqtype = MEMTODEV; - src = dma_addr; - dst = pch->fifo_addr; - break; - case DMA_DEV_TO_MEM: - desc->rqcfg.src_inc = 0; - desc->rqcfg.dst_inc = 1; - desc->req.rqtype = DEVTOMEM; - src = pch->fifo_addr; - dst = dma_addr; - break; - default: + if (!is_slave_direction(direction)) { dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", __func__, __LINE__); return NULL; } - desc->rqcfg.brst_size = pch->burst_sz; - desc->rqcfg.brst_len = 1; + for (i = 0; i < len / period_len; i++) { + desc = pl330_get_desc(pch); + if (!desc) { + dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + __func__, __LINE__); - pch->cyclic = true; + if (!first) + return NULL; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + + while (!list_empty(&first->node)) { + desc = list_entry(first->node.next, + struct dma_pl330_desc, node); + list_move_tail(&desc->node, &pdmac->desc_pool); + } + + list_move_tail(&first->node, &pdmac->desc_pool); - fill_px(&desc->px, dst, src, period_len); + spin_unlock_irqrestore(&pdmac->pool_lock, flags); + + return NULL; + } + + switch (direction) { + case DMA_MEM_TO_DEV: + desc->rqcfg.src_inc = 1; + desc->rqcfg.dst_inc = 0; + desc->req.rqtype = MEMTODEV; + src = dma_addr; + dst = pch->fifo_addr; + break; + case DMA_DEV_TO_MEM: + desc->rqcfg.src_inc = 0; + desc->rqcfg.dst_inc = 1; + desc->req.rqtype = DEVTOMEM; + src = pch->fifo_addr; + dst = dma_addr; + break; + default: + break; + } + + desc->rqcfg.brst_size = pch->burst_sz; + desc->rqcfg.brst_len = 1; + fill_px(&desc->px, dst, src, period_len); + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->node); + + dma_addr += period_len; + } + + if (!desc) + return NULL; + + pch->cyclic = true; + desc->txd.flags = flags; return &desc->txd; } diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index b67f45f5c271..5039fbc88254 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan, shdma_chan); struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); - return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << - sh_chan->xmit_shift; + return sh_desc->hw.tcr - + (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); } /* Called from error IRQ or NMI */ diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 98d670825a1a..6e8887fe6c1b 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, astbo->gem.driver_private = NULL; astbo->bo.bdev = &ast->ttm.bdev; + astbo->bo.bdev->dev_mapping = dev->dev_mapping; ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 0047012045c2..69fd8f1ac8df 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, cirrusbo->gem.driver_private = NULL; cirrusbo->bo.bdev = &cirrus->ttm.bdev; + cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8bcce7866d36..f92da0a32f0d 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ - etime = ktime_sub_ns(etime, delta_ns); + if (delta_ns < 0) + etime = ktime_add_ns(etime, -delta_ns); + else + etime = ktime_sub_ns(etime, delta_ns); *vblank_time = ktime_to_timeval(etime); DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2326fc60ac9..6f514297c483 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1856,10 +1856,16 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) -/* HDMI/DP bits are gen4+ */ -#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) +/* + * HDMI/DP bits are gen4+ + * + * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. + * Please check the detailed lore in the commit message for for experimental + * evidence. + */ +#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) -#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) +#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) #define PORTD_HOTPLUG_INT_STATUS (3 << 21) #define PORTC_HOTPLUG_INT_STATUS (3 << 19) #define PORTB_HOTPLUG_INT_STATUS (3 << 17) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5fb305840db8..e38b45786653 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8269,9 +8269,11 @@ check_crtc_state(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + enum pipe pipe; if (encoder->base.crtc != &crtc->base) continue; - if (encoder->get_config) + if (encoder->get_config && + encoder->get_hw_state(encoder, &pipe)) encoder->get_config(encoder, &pipe_config); } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 67e2c1f1c9a8..5950888ae1d0 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -497,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) goto out; } - /* scale to hardware */ - level = level * freq / max; + /* scale to hardware, but be careful to not overflow */ + if (freq < max) + level = level * freq / max; + else + level = freq / max * level; dev_priv->backlight.level = level; if (dev_priv->backlight.device) @@ -515,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; + /* + * Do not disable backlight on the vgaswitcheroo path. When switching + * away from i915, the other client may depend on i915 to handle the + * backlight. This will leave the backlight on unnecessarily when + * another client is not activated. + */ + if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { + DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); + return; + } + spin_lock_irqsave(&dev_priv->backlight.lock, flags); dev_priv->backlight.enabled = false; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f895d1508df8..b0e4a0bd1313 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) } } else { if (enable_requested) { + unsigned long irqflags; + enum pipe p; + I915_WRITE(HSW_PWR_WELL_DRIVER, 0); + POSTING_READ(HSW_PWR_WELL_DRIVER); DRM_DEBUG_KMS("Requesting to disable the power well\n"); + + /* + * After this, the registers on the pipes that are part + * of the power well will become zero, so we have to + * adjust our counters according to that. + * + * FIXME: Should we do this in general in + * drm_vblank_post_modeset? + */ + spin_lock_irqsave(&dev->vbl_lock, irqflags); + for_each_pipe(p) + if (p != PIPE_A) + dev->last_vblank[p] = 0; + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } } } diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 13878d5de063..d70e4a92773b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, mgabo->gem.driver_private = NULL; mgabo->bo.bdev = &mdev->ttm.bdev; + mgabo->bo.bdev->dev_mapping = dev->dev_mapping; mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0bfd55e08820..9953e1fbc46d 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dacec4e2090..8928bd109c16 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2587,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, if (rdev->wb.enabled) { rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); } else { + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); rptr = RREG32(CP_HQD_PQ_RPTR); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); } rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; @@ -2604,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, if (rdev->wb.enabled) { wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); } else { + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); wptr = RREG32(CP_HQD_PQ_WPTR); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); } wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; @@ -2897,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_CPF_DEBUG, tmp); /* init the pipes */ + mutex_lock(&rdev->srbm_mutex); for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { int me = (i < 4) ? 1 : 2; int pipe = (i < 4) ? i : (i - 4); @@ -2919,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_HPD_EOP_CONTROL, tmp); } cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); /* init the queues. Just two for now. */ for (i = 0; i < 2; i++) { @@ -2972,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) mqd->static_thread_mgmt23[0] = 0xffffffff; mqd->static_thread_mgmt23[1] = 0xffffffff; + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, rdev->ring[idx].me, rdev->ring[idx].pipe, rdev->ring[idx].queue, 0); @@ -3099,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); radeon_bo_kunmap(rdev->ring[idx].mqd_obj); radeon_bo_unreserve(rdev->ring[idx].mqd_obj); @@ -4320,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ + mutex_lock(&rdev->srbm_mutex); for (i = 0; i < 16; i++) { cik_srbm_select(rdev, 0, 0, 0, i); /* CP and shaders */ @@ -4335,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) /* XXX SDMA RLC - todo */ } cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); cik_pcie_gart_tlb_flush(rdev); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -5954,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev) struct radeon_ring *ring; int r; + cik_mc_program(rdev); + if (rdev->flags & RADEON_IS_IGP) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { @@ -5985,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev) if (r) return r; - cik_mc_program(rdev); r = cik_pcie_gart_enable(rdev); if (r) return r; @@ -6194,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); cik_cp_enable(rdev, false); cik_sdma_enable(rdev, false); - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); cik_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -6358,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); cik_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); @@ -6978,7 +6990,7 @@ int cik_uvd_resume(struct radeon_device *rdev) /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 9bcdd174780f..7e5d0b570a30 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 038dcac7670c..d5b49e33315e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5106,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev) /* enable aspm */ evergreen_program_aspm(rdev); + evergreen_mc_program(rdev); + if (ASIC_IS_DCE5(rdev)) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { r = ni_init_microcode(rdev); @@ -5133,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev) if (r) return r; - evergreen_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { evergreen_agp_enable(rdev); } else { @@ -5291,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); - r600_uvd_rbc_stop(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); @@ -5429,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index bb9ea3641312..b0e280058b9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); u32 base_rate = 24000; + u32 max_ratio = clock / base_rate; + u32 dto_phase; + u32 dto_modulo = clock; + u32 wallclock_ratio; + u32 dto_cntl; if (!dig || !dig->afmt) return; + if (max_ratio >= 8) { + dto_phase = 192 * 1000; + wallclock_ratio = 3; + } else if (max_ratio >= 4) { + dto_phase = 96 * 1000; + wallclock_ratio = 2; + } else if (max_ratio >= 2) { + dto_phase = 48 * 1000; + wallclock_ratio = 1; + } else { + dto_phase = 24 * 1000; + wallclock_ratio = 0; + } + dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); + /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); - WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); + WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); } diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a7baf67aef6c..0d582ac1dc31 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -497,6 +497,9 @@ #define DCCG_AUDIO_DTO0_MODULE 0x05b4 #define DCCG_AUDIO_DTO0_LOAD 0x05b8 #define DCCG_AUDIO_DTO0_CNTL 0x05bc +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 #define DCCG_AUDIO_DTO1_PHASE 0x05c0 #define DCCG_AUDIO_DTO1_MODULE 0x05c4 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 56bd4f3be4fe..ccb4f8b54852 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -794,9 +794,13 @@ int ni_init_microcode(struct radeon_device *rdev) if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "ni_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); @@ -2079,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev) /* enable aspm */ evergreen_program_aspm(rdev); + evergreen_mc_program(rdev); + if (rdev->flags & RADEON_IS_IGP) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = ni_init_microcode(rdev); @@ -2107,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev) if (r) return r; - evergreen_mc_program(rdev); r = cayman_pcie_gart_enable(rdev); if (r) return r; @@ -2286,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -2418,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); cayman_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 4f9b9bc20daa..f0f5f748938a 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -4067,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev) struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; struct ni_power_info *ni_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; @@ -4162,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -4188,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 10f712e37003..e66e72077350 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2299,9 +2299,13 @@ int r600_init_microcode(struct radeon_device *rdev) if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "smc: Bogus length %zu in firmware \"%s\"\n", rdev->smc_fw->size, fw_name); @@ -2697,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) return 0; } -void r600_uvd_rbc_stop(struct radeon_device *rdev) +void r600_uvd_stop(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; /* force RBC into idle state */ WREG32(UVD_RBC_RB_CNTL, 0x11010101); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); + mdelay(1); + + /* put VCPU into reset */ + WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); + mdelay(5); + + /* disable VCPU clock */ + WREG32(UVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); + ring->ready = false; } @@ -2722,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev) /* disable interupt */ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); + mdelay(1); + /* put LMI, VCPU, RBC etc... into reset */ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | @@ -2751,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev) WREG32(UVD_MPC_SET_ALU, 0); WREG32(UVD_MPC_SET_MUX, 0x88); - /* Stall UMC */ - WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); - /* take all subblocks out of reset, except VCPU */ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); mdelay(5); @@ -3312,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ r600_pcie_gen2_enable(rdev); + r600_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -3324,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev) if (r) return r; - r600_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { r600_agp_enable(rdev); } else { diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f48240bb8c56..f264df5470f7 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; u32 base_rate = 24000; + u32 max_ratio = clock / base_rate; + u32 dto_phase; + u32 dto_modulo = clock; + u32 wallclock_ratio; + u32 dto_cntl; if (!dig || !dig->afmt) return; + if (max_ratio >= 8) { + dto_phase = 192 * 1000; + wallclock_ratio = 3; + } else if (max_ratio >= 4) { + dto_phase = 96 * 1000; + wallclock_ratio = 2; + } else if (max_ratio >= 2) { + dto_phase = 48 * 1000; + wallclock_ratio = 1; + } else { + dto_phase = 24 * 1000; + wallclock_ratio = 0; + } + /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. * doesn't matter which one you use. Just use the first one. */ @@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* according to the reg specs, this should DCE3.2 only, but in * practice it seems to cover DCE3.0 as well. */ - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); - WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); - WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ + if (dig->dig_encoder == 0) { + dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); + WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); + WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ + } else { + dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); + WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); + WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ + } } else { /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 8e3fe815edab..7c780839a7f4 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -933,6 +933,9 @@ #define DCCG_AUDIO_DTO0_LOAD 0x051c # define DTO_LOAD (1 << 31) #define DCCG_AUDIO_DTO0_CNTL 0x0520 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 #define DCCG_AUDIO_DTO1_PHASE 0x0524 #define DCCG_AUDIO_DTO1_MODULE 0x0528 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f08219c39b6..274b8e1b889f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1468,7 +1468,6 @@ struct radeon_uvd { void *cpu_addr; uint64_t gpu_addr; void *saved_bo; - unsigned fw_size; atomic_t handles[RADEON_MAX_UVD_HANDLES]; struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; struct delayed_work idle_work; @@ -2066,6 +2065,7 @@ struct radeon_device { const struct firmware *mec_fw; /* CIK MEC firmware */ const struct firmware *sdma_fw; /* CIK SDMA firmware */ const struct firmware *smc_fw; /* SMC firmware */ + const struct firmware *uvd_fw; /* UVD firmware */ struct r600_blit r600_blit; struct r600_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ @@ -2095,6 +2095,8 @@ struct radeon_device { /* ACPI interface */ struct radeon_atif atif; struct radeon_atcs atcs; + /* srbm instance registers */ + struct mutex srbm_mutex; }; int radeon_device_init(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 902479fa737f..3d61d5aac18f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -441,7 +441,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde /* uvd */ int r600_uvd_init(struct radeon_device *rdev); int r600_uvd_rbc_start(struct radeon_device *rdev); -void r600_uvd_rbc_stop(struct radeon_device *rdev); +void r600_uvd_stop(struct radeon_device *rdev); int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_uvd_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 82335e38ec4f..63398ae1dbf5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev, mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); mutex_init(&rdev->gpu_clock_mutex); + mutex_init(&rdev->srbm_mutex); init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); @@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); + radeon_pm_suspend(rdev); radeon_suspend(rdev); for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -1564,6 +1566,7 @@ retry: } } + radeon_pm_resume(rdev); drm_helper_resume_force_mode(rdev->ddev); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ddb0efe2408..ddb8f8e04eb5 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } else { /* put fence directly behind firmware */ - index = ALIGN(rdev->uvd.fw_size, 8); + index = ALIGN(rdev->uvd_fw->size, 8); rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 6a51d943ccf4..b990b1a2bd50 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) if (rdev->gart.robj == NULL) { return; } - radeon_gart_table_vram_unpin(rdev); radeon_bo_unref(&rdev->gart.robj); } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f374c467aaca..c557850cd345 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_VERDE: case CHIP_OLAND: case CHIP_HAINAN: - if (radeon_dpm == 1) + /* DPM requires the RLC, RV770+ dGPU requires SMC */ + if (!rdev->rlc_fw) + rdev->pm.pm_method = PM_METHOD_PROFILE; + else if ((rdev->family >= CHIP_RV770) && + (!(rdev->flags & RADEON_IS_IGP)) && + (!rdev->smc_fw)) + rdev->pm.pm_method = PM_METHOD_PROFILE; + else if (radeon_dpm == 1) rdev->pm.pm_method = PM_METHOD_DPM; else rdev->pm.pm_method = PM_METHOD_PROFILE; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 414fd145d20e..f1c15754e73c 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work); int radeon_uvd_init(struct radeon_device *rdev) { - const struct firmware *fw; unsigned long bo_size; const char *fw_name; int i, r; @@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev) return -EINVAL; } - r = request_firmware(&fw, fw_name, rdev->dev); + r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); if (r) { dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", fw_name); return r; } - bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) + + bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); @@ -145,12 +144,6 @@ int radeon_uvd_init(struct radeon_device *rdev) radeon_bo_unreserve(rdev->uvd.vcpu_bo); - rdev->uvd.fw_size = fw->size; - memset(rdev->uvd.cpu_addr, 0, bo_size); - memcpy(rdev->uvd.cpu_addr, fw->data, fw->size); - - release_firmware(fw); - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { atomic_set(&rdev->uvd.handles[i], 0); rdev->uvd.filp[i] = NULL; @@ -174,33 +167,60 @@ void radeon_uvd_fini(struct radeon_device *rdev) } radeon_bo_unref(&rdev->uvd.vcpu_bo); + + release_firmware(rdev->uvd_fw); } int radeon_uvd_suspend(struct radeon_device *rdev) { unsigned size; + void *ptr; + int i; if (rdev->uvd.vcpu_bo == NULL) return 0; + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) + if (atomic_read(&rdev->uvd.handles[i])) + break; + + if (i == RADEON_MAX_UVD_HANDLES) + return 0; + size = radeon_bo_size(rdev->uvd.vcpu_bo); + size -= rdev->uvd_fw->size; + + ptr = rdev->uvd.cpu_addr; + ptr += rdev->uvd_fw->size; + rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size); + memcpy(rdev->uvd.saved_bo, ptr, size); return 0; } int radeon_uvd_resume(struct radeon_device *rdev) { + unsigned size; + void *ptr; + if (rdev->uvd.vcpu_bo == NULL) return -EINVAL; + memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); + + size = radeon_bo_size(rdev->uvd.vcpu_bo); + size -= rdev->uvd_fw->size; + + ptr = rdev->uvd.cpu_addr; + ptr += rdev->uvd_fw->size; + if (rdev->uvd.saved_bo != NULL) { - unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo); - memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size); + memcpy(ptr, rdev->uvd.saved_bo, size); kfree(rdev->uvd.saved_bo); rdev->uvd.saved_bo = NULL; - } + } else + memset(ptr, 0, size); return 0; } @@ -215,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) { int i, r; for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (rdev->uvd.filp[i] == filp) { - uint32_t handle = atomic_read(&rdev->uvd.handles[i]); + uint32_t handle = atomic_read(&rdev->uvd.handles[i]); + if (handle != 0 && rdev->uvd.filp[i] == filp) { struct radeon_fence *fence; r = radeon_uvd_get_destroy_msg(rdev, @@ -337,8 +357,10 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, } r = radeon_bo_kmap(bo, &ptr); - if (r) + if (r) { + DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); return r; + } msg = ptr + offset; @@ -364,8 +386,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, radeon_bo_kunmap(bo); return 0; } else { - /* it's a create msg, no special handling needed */ radeon_bo_kunmap(bo); + + if (msg_type != 0) { + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; + } + + /* it's a create msg, no special handling needed */ } /* create or decode, validate the handle */ @@ -388,7 +416,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, int data0, int data1, - unsigned buf_sizes[]) + unsigned buf_sizes[], bool *has_msg_cmd) { struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_reloc *reloc; @@ -417,7 +445,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, if (cmd < 0x4) { if ((end - start) < buf_sizes[cmd]) { - DRM_ERROR("buffer to small (%d / %d)!\n", + DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned)(end - start), buf_sizes[cmd]); return -EINVAL; } @@ -442,9 +470,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, } if (cmd == 0) { + if (*has_msg_cmd) { + DRM_ERROR("More than one message in a UVD-IB!\n"); + return -EINVAL; + } + *has_msg_cmd = true; r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); if (r) return r; + } else if (!*has_msg_cmd) { + DRM_ERROR("Message needed before other commands are send!\n"); + return -EINVAL; } return 0; @@ -453,7 +489,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, int *data0, int *data1, - unsigned buf_sizes[]) + unsigned buf_sizes[], + bool *has_msg_cmd) { int i, r; @@ -467,7 +504,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, *data1 = p->idx; break; case UVD_GPCOM_VCPU_CMD: - r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); + r = radeon_uvd_cs_reloc(p, *data0, *data1, + buf_sizes, has_msg_cmd); if (r) return r; break; @@ -488,6 +526,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) struct radeon_cs_packet pkt; int r, data0 = 0, data1 = 0; + /* does the IB has a msg command */ + bool has_msg_cmd = false; + /* minimum buffer sizes */ unsigned buf_sizes[] = { [0x00000000] = 2048, @@ -514,8 +555,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) return r; switch (pkt.type) { case RADEON_PACKET_TYPE0: - r = radeon_uvd_cs_reg(p, &pkt, &data0, - &data1, buf_sizes); + r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, + buf_sizes, &has_msg_cmd); if (r) return r; break; @@ -527,6 +568,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + + if (!has_msg_cmd) { + DRM_ERROR("UVD-IBs need a msg command!\n"); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 363018c60412..bdd888b4db2b 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -1944,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) int rv6xx_dpm_init(struct radeon_device *rdev) { - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; + struct radeon_atom_ss ss; struct atom_clock_dividers dividers; struct rv6xx_power_info *pi; int ret; @@ -1989,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev) pi->gfx_clock_gating = true; - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; + pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_ENGINE_SS, 0); + pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_MEMORY_SS, 0); + + /* Disable sclk ss, causes hangs on a lot of systems */ + pi->sclk_ss = false; + + if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; + else pi->dynamic_ss = false; - } pi->dynamic_pcie_gen2 = true; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 30ea14e8854c..bcc68ec204ad 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev) /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); @@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ rv770_pcie_gen2_enable(rdev); + rv770_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; - rv770_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { rv770_agp_enable(rdev); } else { @@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev) int rv770_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); @@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 2d347925f77d..094c67a29d0d 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2319,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) return 0; } +void rv770_get_engine_memory_ss(struct radeon_device *rdev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(rdev); + struct radeon_atom_ss ss; + + pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_ENGINE_SS, 0); + pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_MEMORY_SS, 0); + + if (pi->sclk_ss || pi->mclk_ss) + pi->dynamic_ss = true; + else + pi->dynamic_ss = false; +} + int rv770_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2369,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev) pi->mvdd_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = false; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = RV770_HASI_DFLT; @@ -2393,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 96b1b2a62a8a..9244effc6b59 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h @@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, struct radeon_ps *new_ps, struct radeon_ps *old_ps); +void rv770_get_engine_memory_ss(struct radeon_device *rdev); /* smc */ int rv770_read_smc_soft_register(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 6ca904673a4f..daa8d2df8ec5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1663,9 +1663,13 @@ static int si_init_microcode(struct radeon_device *rdev) snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "si_smc: Bogus length %zu in firmware \"%s\"\n", rdev->smc_fw->size, fw_name); @@ -6418,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev) /* enable aspm */ si_program_aspm(rdev); + si_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || !rdev->rlc_fw || !rdev->mc_fw) { r = si_init_microcode(rdev); @@ -6437,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; - si_mc_program(rdev); r = si_pcie_gart_enable(rdev); if (r) return r; @@ -6621,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev) si_cp_enable(rdev, false); cayman_dma_stop(rdev); if (rdev->has_uvd) { - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); } si_irq_suspend(rdev); @@ -6763,8 +6768,10 @@ void si_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - if (rdev->has_uvd) + if (rdev->has_uvd) { + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); + } si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 7ad22e87cd62..88699e3cd868 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -1767,7 +1767,7 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe s64 temperature, t_slope, t_intercept, av, bv, t_ref; s64 tmp; - i_leakage = drm_int2fixp(ileakage) / 100; + i_leakage = div64_s64(drm_int2fixp(ileakage), 100); vddc = div64_s64(drm_int2fixp(v), 1000); temperature = div64_s64(drm_int2fixp(t), 1000); @@ -2903,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, { struct ni_ps *ps = ni_get_ps(rps); struct radeon_clock_and_voltage_limits *max_limits; - bool disable_mclk_switching; + bool disable_mclk_switching = false; + bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci; int i; @@ -2911,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if ((rdev->pm.dpm.new_active_crtc_count > 1) || ni_dpm_vblank_too_short(rdev)) disable_mclk_switching = true; - else - disable_mclk_switching = false; + + if (rps->vclk || rps->dclk) { + disable_mclk_switching = true; + disable_sclk_switching = true; + } if (rdev->pm.dpm.ac_power) max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; @@ -2940,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if (disable_mclk_switching) { mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; - sclk = ps->performance_levels[0].sclk; - vddc = ps->performance_levels[0].vddc; vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; } else { - sclk = ps->performance_levels[0].sclk; mclk = ps->performance_levels[0].mclk; - vddc = ps->performance_levels[0].vddc; vddci = ps->performance_levels[0].vddci; } + if (disable_sclk_switching) { + sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; + vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; + } else { + sclk = ps->performance_levels[0].sclk; + vddc = ps->performance_levels[0].vddc; + } + /* adjusted low state */ ps->performance_levels[0].sclk = sclk; ps->performance_levels[0].mclk = mclk; ps->performance_levels[0].vddc = vddc; ps->performance_levels[0].vddci = vddci; - for (i = 1; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) - ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; - if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) - ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; + if (disable_sclk_switching) { + sclk = ps->performance_levels[0].sclk; + for (i = 1; i < ps->performance_level_count; i++) { + if (sclk < ps->performance_levels[i].sclk) + sclk = ps->performance_levels[i].sclk; + } + for (i = 0; i < ps->performance_level_count; i++) { + ps->performance_levels[i].sclk = sclk; + ps->performance_levels[i].vddc = vddc; + } + } else { + for (i = 1; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) + ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; + if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) + ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; + } } if (disable_mclk_switching) { @@ -6253,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev) struct evergreen_power_info *eg_pi; struct ni_power_info *ni_pi; struct si_power_info *si_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; u32 mask; @@ -6346,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev) si_pi->vddc_phase_shed_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -6366,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev) eg_pi->sclk_deep_sleep = true; si_pi->sclk_deep_sleep_above_low = false; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 7a5764843bfb..cd33084c7860 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -488,8 +488,6 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) if (djrcv_dev->querying_devices) return 0; - djrcv_dev->querying_devices = true; - dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 0f34bca9f5e5..6099f50b28aa 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg, u16 value) { return i2c_smbus_write_byte_data(client, reg, value & 0xFF) - && i2c_smbus_write_byte_data(client, reg + 1, value >> 8); + || i2c_smbus_write_byte_data(client, reg + 1, value >> 8); } static void adt7470_init_client(struct i2c_client *client) diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c index ccec916bc3eb..af8f65fb1c05 100644 --- a/drivers/i2c/busses/i2c-kempld.c +++ b/drivers/i2c/busses/i2c-kempld.c @@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c) bus_frequency = KEMPLD_I2C_FREQ_MAX; if (pld->info.spec_major == 1) - prescale = pld->pld_clock / bus_frequency * 5 - 1000; + prescale = pld->pld_clock / (bus_frequency * 5) - 1000; else - prescale = pld->pld_clock / bus_frequency * 4 - 3000; + prescale = pld->pld_clock / (bus_frequency * 4) - 3000; if (prescale < 0) prescale = 0; diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index df8ff5aea5b5..e2e9a0dade96 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -493,7 +493,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, * based on this empirical measurement and a lot of previous frobbing. */ i2c->cmd_err = 0; - if (msg->len < 8) { + if (0) { /* disable PIO mode until a proper fix is made */ ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); if (ret) mxs_i2c_reset(i2c); diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 0ad208a69c29..3ceac3e91dde 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev) { unsigned int stepconfig; int i, steps; - u32 step_en; /* * There are 16 configurable steps and 8 analog input @@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev) adc_dev->channel_step[i] = steps; steps++; } - step_en = get_adc_step_mask(adc_dev); - am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en); + } static const char * const chan_name_ain[] = { @@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct tiadc_device *adc_dev = iio_priv(indio_dev); - int i; - unsigned int fifo1count, read; + int i, map_val; + unsigned int fifo1count, read, stepid; u32 step = UINT_MAX; bool found = false; + u32 step_en; + unsigned long timeout = jiffies + usecs_to_jiffies + (IDLE_TIMEOUT * adc_dev->channels); + step_en = get_adc_step_mask(adc_dev); + am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en); + + /* Wait for ADC sequencer to complete sampling */ + while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) { + if (time_after(jiffies, timeout)) + return -EAGAIN; + } + map_val = chan->channel + TOTAL_CHANNELS; /* * When the sub-system is first enabled, @@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); for (i = 0; i < fifo1count; i++) { read = tiadc_readl(adc_dev, REG_FIFO1); - if (read >> 16 == step) { - *val = read & 0xfff; + stepid = read & FIFOREAD_CHNLID_MASK; + stepid = stepid >> 0x10; + + if (stepid == map_val) { + read = read & FIFOREAD_DATA_MASK; found = true; + *val = read; } } - am335x_tsc_se_update(adc_dev->mfd_tscadc); + if (found == false) return -EBUSY; return IIO_VAL_INT; diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index ea8a4146620d..0dd9bb873130 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name, void iio_trigger_poll(struct iio_trigger *trig, s64 time) { int i; - if (!trig->use_count) - for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) - if (trig->subirqs[i].enabled) { - trig->use_count++; + + if (!atomic_read(&trig->use_count)) { + atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); + + for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { + if (trig->subirqs[i].enabled) generic_handle_irq(trig->subirq_base + i); - } + else + iio_trigger_notify_done(trig); + } + } } EXPORT_SYMBOL(iio_trigger_poll); @@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) { int i; - if (!trig->use_count) - for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) - if (trig->subirqs[i].enabled) { - trig->use_count++; + + if (!atomic_read(&trig->use_count)) { + atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); + + for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { + if (trig->subirqs[i].enabled) handle_nested_irq(trig->subirq_base + i); - } + else + iio_trigger_notify_done(trig); + } + } } EXPORT_SYMBOL(iio_trigger_poll_chained); void iio_trigger_notify_done(struct iio_trigger *trig) { - trig->use_count--; - if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable) + if (atomic_dec_and_test(&trig->use_count) && trig->ops && + trig->ops->try_reenable) if (trig->ops->try_reenable(trig)) /* Missed an interrupt so launch new poll now */ iio_trigger_poll(trig, 0); diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c index efdc873e58d1..a9857022f71d 100644 --- a/drivers/media/i2c/ml86v7667.c +++ b/drivers/media/i2c/ml86v7667.c @@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; + int ret = -EINVAL; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: @@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl) break; } - return 0; + return ret; } static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index df4ada880e42..bd9405df1bd6 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -1987,7 +1987,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids); #ifdef CONFIG_OF static const struct of_device_id coda_dt_ids[] = { - { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] }, + { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] }, { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, { /* sentinel */ } }; diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 553d87e5ceab..fd6289d60cde 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev) } *vfd = g2d_videodev; vfd->lock = &dev->mutex; + vfd->v4l2_dev = &dev->v4l2_dev; ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 5296385153d5..4f6dd42c9adb 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) pix_mp->num_planes = 2; /* Set pixelformat to the format in which MFC outputs the decoded frame */ - pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT; + pix_mp->pixelformat = ctx->dst_fmt->fourcc; pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; @@ -382,10 +382,16 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) mfc_err("Unsupported format for source.\n"); return -EINVAL; } - if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { - mfc_err("Not supported format.\n"); + if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) { + mfc_err("Unknown codec\n"); return -EINVAL; } + if (!IS_MFCV6(dev)) { + if (fmt->fourcc == V4L2_PIX_FMT_VP8) { + mfc_err("Not supported format.\n"); + return -EINVAL; + } + } } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { fmt = find_format(f, MFC_FMT_RAW); if (!fmt) { @@ -411,7 +417,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret = 0; - struct s5p_mfc_fmt *fmt; struct v4l2_pix_format_mplane *pix_mp; mfc_debug_enter(); @@ -425,54 +430,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) goto out; } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - fmt = find_format(f, MFC_FMT_RAW); - if (!fmt) { - mfc_err("Unsupported format for source.\n"); - return -EINVAL; - } - if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } else if (IS_MFCV6(dev) && - (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } - ctx->dst_fmt = fmt; - mfc_debug_leave(); - return ret; - } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { - mfc_err("Wrong type error for S_FMT : %d", f->type); - return -EINVAL; - } - fmt = find_format(f, MFC_FMT_DEC); - if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) { - mfc_err("Unknown codec\n"); - ret = -EINVAL; + /* dst_fmt is validated by call to vidioc_try_fmt */ + ctx->dst_fmt = find_format(f, MFC_FMT_RAW); + ret = 0; goto out; - } - if (fmt->type != MFC_FMT_DEC) { - mfc_err("Wrong format selected, you should choose " - "format for decoding\n"); + } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + /* src_fmt is validated by call to vidioc_try_fmt */ + ctx->src_fmt = find_format(f, MFC_FMT_DEC); + ctx->codec_mode = ctx->src_fmt->codec_mode; + mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); + pix_mp->height = 0; + pix_mp->width = 0; + if (pix_mp->plane_fmt[0].sizeimage) + ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; + else + pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = + DEF_CPB_SIZE; + pix_mp->plane_fmt[0].bytesperline = 0; + ctx->state = MFCINST_INIT; + ret = 0; + goto out; + } else { + mfc_err("Wrong type error for S_FMT : %d", f->type); ret = -EINVAL; goto out; } - if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } - ctx->src_fmt = fmt; - ctx->codec_mode = fmt->codec_mode; - mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); - pix_mp->height = 0; - pix_mp->width = 0; - if (pix_mp->plane_fmt[0].sizeimage) - ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; - else - pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = - DEF_CPB_SIZE; - pix_mp->plane_fmt[0].bytesperline = 0; - ctx->state = MFCINST_INIT; + out: mfc_debug_leave(); return ret; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 2549967b2f85..59e56f4c8ce3 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -906,6 +906,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { + struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_fmt *fmt; struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; @@ -930,6 +931,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) return -EINVAL; } + if (!IS_MFCV6(dev)) { + if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) { + mfc_err("Not supported format.\n"); + return -EINVAL; + } + } else if (IS_MFCV6(dev)) { + if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) { + mfc_err("Not supported format.\n"); + return -EINVAL; + } + } + if (fmt->num_planes != pix_fmt_mp->num_planes) { mfc_err("failed to try output format\n"); return -EINVAL; @@ -947,7 +960,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); - struct s5p_mfc_fmt *fmt; struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; int ret = 0; @@ -960,13 +972,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) goto out; } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - fmt = find_format(f, MFC_FMT_ENC); - if (!fmt) { - mfc_err("failed to set capture format\n"); - return -EINVAL; - } + /* dst_fmt is validated by call to vidioc_try_fmt */ + ctx->dst_fmt = find_format(f, MFC_FMT_ENC); ctx->state = MFCINST_INIT; - ctx->dst_fmt = fmt; ctx->codec_mode = ctx->dst_fmt->codec_mode; ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; pix_fmt_mp->plane_fmt[0].bytesperline = 0; @@ -987,28 +995,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) } mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { - fmt = find_format(f, MFC_FMT_RAW); - if (!fmt) { - mfc_err("failed to set output format\n"); - return -EINVAL; - } - - if (!IS_MFCV6(dev) && - (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } else if (IS_MFCV6(dev) && - (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) { - mfc_err("Not supported format.\n"); - return -EINVAL; - } - - if (fmt->num_planes != pix_fmt_mp->num_planes) { - mfc_err("failed to set output format\n"); - ret = -EINVAL; - goto out; - } - ctx->src_fmt = fmt; + /* src_fmt is validated by call to vidioc_try_fmt */ + ctx->src_fmt = find_format(f, MFC_FMT_RAW); ctx->img_width = pix_fmt_mp->width; ctx->img_height = pix_fmt_mp->height; mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 4851cc2e4a4d..c4ff9739a7ae 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus, *eedata = data; *eedata_len = len; - dev_config = (void *)eedata; + dev_config = (void *)*eedata; switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) { case 0: diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index cb694055ba7d..6e5070774dc2 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface, dev->workqueue = 0; + /* init video transfer queues first of all */ + /* to prevent oops in hdpvr_delete() on error paths */ + INIT_LIST_HEAD(&dev->free_buff_list); + INIT_LIST_HEAD(&dev->rec_buff_list); + /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); @@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface, if (!dev->workqueue) goto error; - /* init video transfer queues */ - INIT_LIST_HEAD(&dev->free_buff_list); - INIT_LIST_HEAD(&dev->rec_buff_list); - dev->options = hdpvr_default_options; if (default_video_input < HDPVR_VIDEO_INPUTS) @@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface, video_nr[atomic_inc_return(&dev_nr)]); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); - goto error; + goto reg_fail; } /* let the user know what node this device is now attached to */ diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig index 8864436464bf..7c5b86006ee6 100644 --- a/drivers/media/usb/usbtv/Kconfig +++ b/drivers/media/usb/usbtv/Kconfig @@ -1,6 +1,6 @@ config VIDEO_USBTV tristate "USBTV007 video capture support" - depends on VIDEO_DEV + depends on VIDEO_V4L2 select VIDEOBUF2_VMALLOC ---help--- diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c index bf43f874685e..91650173941a 100644 --- a/drivers/media/usb/usbtv/usbtv.c +++ b/drivers/media/usb/usbtv/usbtv.c @@ -57,7 +57,7 @@ #define USBTV_CHUNK_SIZE 256 #define USBTV_CHUNK 240 #define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ - / 2 / USBTV_CHUNK) + / 4 / USBTV_CHUNK) /* Chunk header. */ #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ @@ -89,6 +89,7 @@ struct usbtv { /* Number of currently processed frame, useful find * out when a new one begins. */ u32 frame_id; + int chunks_done; int iso_size; unsigned int sequence; @@ -202,6 +203,26 @@ static int usbtv_setup_capture(struct usbtv *usbtv) return 0; } +/* Copy data from chunk into a frame buffer, deinterlacing the data + * into every second line. Unfortunately, they don't align nicely into + * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels. + * Therefore, we break down the chunk into two halves before copyting, + * so that we can interleave a line if needed. */ +static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd) +{ + int half; + + for (half = 0; half < 2; half++) { + int part_no = chunk_no * 2 + half; + int line = part_no / 3; + int part_index = (line * 2 + !odd) * 3 + (part_no % 3); + + u32 *dst = &frame[part_index * USBTV_CHUNK/2]; + memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src)); + src += USBTV_CHUNK/2; + } +} + /* Called for each 256-byte image chunk. * First word identifies the chunk, followed by 240 words of image * data and padding. */ @@ -218,17 +239,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) frame_id = USBTV_FRAME_ID(chunk); odd = USBTV_ODD(chunk); chunk_no = USBTV_CHUNK_NO(chunk); - - /* Deinterlace. TODO: Use interlaced frame format. */ - chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3; - chunk_no += !odd * 3; - if (chunk_no >= USBTV_CHUNKS) return; /* Beginning of a frame. */ - if (chunk_no == 0) + if (chunk_no == 0) { usbtv->frame_id = frame_id; + usbtv->chunks_done = 0; + } + + if (usbtv->frame_id != frame_id) + return; spin_lock_irqsave(&usbtv->buflock, flags); if (list_empty(&usbtv->bufs)) { @@ -241,19 +262,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); frame = vb2_plane_vaddr(&buf->vb, 0); - /* Copy the chunk. */ - memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1], - USBTV_CHUNK * sizeof(chunk[1])); + /* Copy the chunk data. */ + usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd); + usbtv->chunks_done++; /* Last chunk in a frame, signalling an end */ - if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) { + if (odd && chunk_no == USBTV_CHUNKS-1) { int size = vb2_plane_size(&buf->vb, 0); + enum vb2_buffer_state state = usbtv->chunks_done == + USBTV_CHUNKS ? + VB2_BUF_STATE_DONE : + VB2_BUF_STATE_ERROR; buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; buf->vb.v4l2_buf.sequence = usbtv->sequence++; v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); vb2_set_plane_payload(&buf->vb, 0, size); - vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); + vb2_buffer_done(&buf->vb, state); list_del(&buf->list); } @@ -518,7 +543,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq, if (*nbuffers < 2) *nbuffers = 2; *nplanes = 1; - sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32); + sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); return 0; } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 25723d8ee201..925ab8ec9329 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) if ((mc->ptr + rec_len) > mc->end) goto decode_failed; - memcpy(cf->data, mc->ptr, rec_len); + memcpy(cf->data, mc->ptr, cf->can_dlc); mc->ptr += rec_len; } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f1b121ee5525..55d79cb53a79 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int work_done; - for (work_done = 0; work_done <= budget; work_done++) { + for (work_done = 0; work_done < budget; work_done++) { unsigned int *last_rx_bd = &priv->last_rx_bd; struct net_device_stats *stats = &priv->stats; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 7ecb44ad24fb..126dec4342e6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1502,6 +1502,7 @@ struct bnx2x { #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) #define INTERRUPTS_ENABLED_FLAG (1 << 23) +#define BC_SUPPORTS_RMMOD_CMD (1 << 24) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1830,6 +1831,8 @@ struct bnx2x { int fp_array_size; u32 dump_preset_idx; + bool stats_started; + struct semaphore stats_sema; }; /* Tx queues may be less or equal to Rx queues */ @@ -2451,4 +2454,6 @@ enum bnx2x_pci_bus_speed { BNX2X_PCI_LINK_SPEED_5000 = 5000, BNX2X_PCI_LINK_SPEED_8000 = 8000 }; + +void bnx2x_set_local_cmng(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0c94df47e0e8..f9122f2d6b65 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -753,6 +753,10 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_update_ets_params(bp); + + /* ets may affect cmng configuration: reinit it in hw */ + bnx2x_set_local_cmng(bp); + bnx2x_dcbx_resume_hw_tx(bp); return; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5018e52ae2ad..32767f6aa33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1300,6 +1300,9 @@ struct drv_func_mb { #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 + #define DRV_MSG_CODE_RMMOD 0xdb000000 + #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 @@ -1372,6 +1375,8 @@ struct drv_func_mb { #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 + #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 39d7db58bf86..7f4ec80f0cb3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2476,7 +2476,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) input.port_rate = bp->link_vars.line_speed; - if (cmng_type == CMNG_FNS_MINMAX) { + if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { int vn; /* read mf conf from shmem */ @@ -2533,6 +2533,21 @@ static void storm_memset_cmng(struct bnx2x *bp, } } +/* init cmng mode in HW according to local configuration */ +void bnx2x_set_local_cmng(struct bnx2x *bp) +{ + int cmng_fns = bnx2x_get_cmng_fns_mode(bp); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(bp, false, cmng_fns); + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + } else { + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "single function mode without fairness\n"); + } +} + /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { @@ -2568,17 +2583,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); } - if (bp->link_vars.link_up && bp->link_vars.line_speed) { - int cmng_fns = bnx2x_get_cmng_fns_mode(bp); - - if (cmng_fns != CMNG_FNS_NONE) { - bnx2x_cmng_fns_init(bp, false, cmng_fns); - storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - } else - /* rate shaping and fairness are disabled */ - DP(NETIF_MSG_IFUP, - "single function mode without fairness\n"); - } + if (bp->link_vars.link_up && bp->link_vars.line_speed) + bnx2x_set_local_cmng(bp); __bnx2x_link_report(bp); @@ -10360,6 +10366,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; + + bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? + BC_SUPPORTS_RMMOD_CMD : 0; + boot_mode = SHMEM_RD(bp, dev_info.port_feature_config[BP_PORT(bp)].mba_config) & PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; @@ -11522,6 +11532,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); spin_lock_init(&bp->stats_lock); + sema_init(&bp->stats_sema, 1); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); @@ -12825,13 +12836,17 @@ static void __bnx2x_remove(struct pci_dev *pdev, bnx2x_dcbnl_update_applist(bp, true); #endif + if (IS_PF(bp) && + !BP_NOMCP(bp) && + (bp->flags & BC_SUPPORTS_RMMOD_CMD)) + bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); + /* Close the interface - either directly or implicitly */ if (remove_netdev) { unregister_netdev(dev); } else { rtnl_lock(); - if (netif_running(dev)) - bnx2x_close(dev); + dev_close(dev); rtnl_unlock(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 6291324913e9..1d925fd9cdc6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3471,7 +3471,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) alloc_mem_err: BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, sizeof(struct bnx2x_vf_mbx_msg)); - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, sizeof(union pf_vf_bulletin)); return -ENOMEM; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 98366abd02bd..d63d1327b051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) * Statistics service functions */ -static void bnx2x_stats_pmf_update(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_pmf_update(struct bnx2x *bp) { struct dmae_command *dmae; u32 opcode; @@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) *stats_comp = 0; } -static void bnx2x_stats_start(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_start(struct bnx2x *bp) { /* vfs travel through here as part of the statistics FSM, but no action * is required @@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); + + bp->stats_started = true; +} + +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_stats_pmf_start(struct bnx2x *bp) { + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_pmf_update(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_pmf_update(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); +} + +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_pmf_update(bp); + up(&bp->stats_sema); } static void bnx2x_stats_restart(struct bnx2x *bp) @@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp) */ if (IS_VF(bp)) return; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_bmac_stats_update(struct bnx2x *bp) @@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) /* Make sure we use the value of the counter * used for sending the last stats ramrod. */ - spin_lock_bh(&bp->stats_lock); cur_stats_counter = bp->stats_counter - 1; - spin_unlock_bh(&bp->stats_lock); /* are storm stats valid? */ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { @@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); - if (bnx2x_edebug_stats_stopped(bp)) + /* we run update from timer context, so give up + * if somebody is in the middle of transition + */ + if (down_trylock(&bp->stats_sema)) return; + if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) + goto out; + if (IS_PF(bp)) { if (*stats_comp != DMAE_COMP_VAL) - return; + goto out; if (bp->port.pmf) bnx2x_hw_stats_update(bp); @@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) BNX2X_ERR("storm stats were not updated for 3 times\n"); bnx2x_panic(); } - return; + goto out; } } else { /* vf doesn't collect HW statistics, and doesn't get completions @@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) /* vf is done */ if (IS_VF(bp)) - return; + goto out; if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; @@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); + +out: + up(&bp->stats_sema); } static void bnx2x_port_stats_stop(struct bnx2x *bp) @@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) { int update = 0; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + + bp->stats_started = false; + bnx2x_stats_comp(bp); if (bp->port.pmf) @@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } + + up(&bp->stats_sema); } static void bnx2x_stats_do_nothing(struct bnx2x *bp) @@ -1376,15 +1416,17 @@ static const struct { void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) { enum bnx2x_stats_state state; + void (*action)(struct bnx2x *bp); if (unlikely(bp->panic)) return; spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; + action = bnx2x_stats_stm[state][event].action; spin_unlock_bh(&bp->stats_lock); - bnx2x_stats_stm[state][event].action(bp); + action(bp); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ff28081146f2..95b8995187d7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17858,8 +17858,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, done: if (state == pci_channel_io_perm_failure) { - tg3_napi_enable(tp); - dev_close(netdev); + if (netdev) { + tg3_napi_enable(tp); + dev_close(netdev); + } err = PCI_ERS_RESULT_DISCONNECT; } else { pci_disable_device(pdev); @@ -17889,7 +17891,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rtnl_lock(); if (pci_enable_device(pdev)) { - netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); goto done; } @@ -17897,7 +17900,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) pci_restore_state(pdev); pci_save_state(pdev); - if (!netif_running(netdev)) { + if (!netdev || !netif_running(netdev)) { rc = PCI_ERS_RESULT_RECOVERED; goto done; } @@ -17909,7 +17912,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rc = PCI_ERS_RESULT_RECOVERED; done: - if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { + if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { tg3_napi_enable(tp); dev_close(netdev); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 687ec4a8bb48..9c89dc8fe105 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { - __free_pages(q->pg_chunk.page, order); - q->pg_chunk.page = NULL; - return -EIO; - } q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; @@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) return flits_to_desc(flits); } - -/* map_skb - map a packet main body and its page fragments - * @pdev: the PCI device - * @skb: the packet - * @addr: placeholder to save the mapped addresses - * - * map the main body of an sk_buff and its page fragments, if any. - */ -static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, - dma_addr_t *addr) -{ - const skb_frag_t *fp, *end; - const struct skb_shared_info *si; - - *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, *addr)) - goto out_err; - - si = skb_shinfo(skb); - end = &si->frags[si->nr_frags]; - - for (fp = si->frags; fp < end; fp++) { - *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), - DMA_TO_DEVICE); - if (pci_dma_mapping_error(pdev, *addr)) - goto unwind; - } - return 0; - -unwind: - while (fp-- > si->frags) - dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), - DMA_TO_DEVICE); - - pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); -out_err: - return -ENOMEM; -} - /** - * write_sgl - populate a scatter/gather list for a packet + * make_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL - * @addr: the list of the mapped addresses + * @pdev: the PCI device * - * Copies the scatter/gather list for the buffers that make up a packet + * Generates a scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately. */ -static inline unsigned int write_sgl(const struct sk_buff *skb, +static inline unsigned int make_sgl(const struct sk_buff *skb, struct sg_ent *sgp, unsigned char *start, - unsigned int len, const dma_addr_t *addr) + unsigned int len, struct pci_dev *pdev) { - unsigned int i, j = 0, k = 0, nfrags; + dma_addr_t mapping; + unsigned int i, j = 0, nfrags; if (len) { + mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); sgp->len[0] = cpu_to_be32(len); - sgp->addr[j++] = cpu_to_be64(addr[k++]); + sgp->addr[0] = cpu_to_be64(mapping); + j = 1; } nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); - sgp->addr[j] = cpu_to_be64(addr[k++]); + sgp->addr[j] = cpu_to_be64(mapping); j ^= 1; if (j == 0) ++sgp; @@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, const struct port_info *pi, unsigned int pidx, unsigned int gen, struct sge_txq *q, unsigned int ndesc, - unsigned int compl, const dma_addr_t *addr) + unsigned int compl) { unsigned int flits, sgl_flits, cntrl, tso_info; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; @@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, } sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); + sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), @@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* * The chip min packet length is 9 octets but play safe and reject @@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - q->in_use += ndesc; if (unlikely(credits - ndesc < q->stop_thres)) { t3_stop_tx_queue(txq, qs, q); @@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!skb_shared(skb))) skb_orphan(skb); - write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); check_ring_tx_db(adap, q); return NETDEV_TX_OK; } @@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, */ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, struct sge_txq *q, unsigned int pidx, - unsigned int gen, unsigned int ndesc, - const dma_addr_t *addr) + unsigned int gen, unsigned int ndesc) { unsigned int sgl_flits, flits; struct work_request_hdr *from; @@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), - skb_tail_pointer(skb) - - skb_transport_header(skb), addr); + sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), + skb->tail - skb->transport_header, + adap->pdev); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; @@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); goto again; } - if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { - spin_unlock(&q->lock); - return NET_XMIT_SUCCESS; - } - gen = q->gen; q->in_use += ndesc; pidx = q->pidx; @@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); } spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); check_ring_tx_db(adap, q); return NET_XMIT_SUCCESS; } @@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data) struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; - unsigned int written = 0; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); @@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); break; } - if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) - break; - gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; - written += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; @@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc, - (dma_addr_t *)skb->head); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); spin_lock(&q->lock); } spin_unlock(&q->lock); @@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif wmb(); - if (likely(written)) - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /** diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 5175f9729970..85923e2d63b9 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -3168,6 +3168,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) adapter->max_event_queues = le16_to_cpu(desc->eq_count); adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); + + /* Clear flags that driver is not interested in */ + adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; } err: mutex_unlock(&adapter->mbox_lock); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 9b916086c335..6237192a55d1 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -564,6 +564,12 @@ enum be_if_flags { BE_IF_FLAGS_MULTICAST = 0x1000 }; +#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ + BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ + BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ + BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ + BE_IF_FLAGS_UNTAGGED) + /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ struct be_cmd_req_if_create { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) } /* Allocate and setup a new buffer for receiving */ -static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, - struct sk_buff *skb, unsigned int bufsize) +static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) { struct skge_rx_desc *rd = e->desc; - u64 map; + dma_addr_t map; map = pci_map_single(skge->hw->pdev, skb->data, bufsize, PCI_DMA_FROMDEVICE); - rd->dma_lo = map; - rd->dma_hi = map >> 32; + if (pci_dma_mapping_error(skge->hw->pdev, map)) + return -1; + + rd->dma_lo = lower_32_bits(map); + rd->dma_hi = upper_32_bits(map); e->skb = skb; rd->csum1_start = ETH_HLEN; rd->csum2_start = ETH_HLEN; @@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, bufsize); + return 0; } /* Resume receiving using existing skb, @@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) return -ENOMEM; skb_reserve(skb, NET_IP_ALIGN); - skge_rx_setup(skge, e, skb, skge->rx_buf_size); + if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { + dev_kfree_skb(skb); + return -EIO; + } } while ((e = e->next) != ring->start); ring->to_clean = ring->start; @@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) BUG_ON(skge->dma & 7); - if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { + if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); err = -EINVAL; goto free_pci_mem; @@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, struct skge_tx_desc *td; int i; u32 control, len; - u64 map; + dma_addr_t map; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; @@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, e->skb = skb; len = skb_headlen(skb); map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(hw->pdev, map)) + goto mapping_error; + dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, len); - td->dma_lo = map; - td->dma_hi = map >> 32; + td->dma_lo = lower_32_bits(map); + td->dma_hi = upper_32_bits(map); if (skb->ip_summed == CHECKSUM_PARTIAL) { const int offset = skb_checksum_start_offset(skb); @@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) + goto mapping_unwind; e = e->next; e->skb = skb; tf = e->desc; BUG_ON(tf->control & BMU_OWN); - tf->dma_lo = map; - tf->dma_hi = (u64) map >> 32; + tf->dma_lo = lower_32_bits(map); + tf->dma_hi = upper_32_bits(map); dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, skb_frag_size(frag)); @@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, } return NETDEV_TX_OK; + +mapping_unwind: + e = skge->tx_ring.to_use; + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + while (i-- > 0) { + e = e->next; + pci_unmap_page(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_OK; } @@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, pci_dma_sync_single_for_cpu(skge->hw->pdev, dma_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(e->skb, skb->data, len); pci_dma_sync_single_for_device(skge->hw->pdev, dma_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); skge_rx_reuse(e, skge->rx_buf_size); } else { struct sk_buff *nskb; @@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, if (!nskb) goto resubmit; + if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { + dev_kfree_skb(nskb); + goto resubmit; + } + pci_unmap_single(skge->hw->pdev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), PCI_DMA_FROMDEVICE); skb = e->skb; prefetch(skb->data); - skge_rx_setup(skge, e, nskb, skge->rx_buf_size); } skb_put(skb, len); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c571de85d0f9..5472cbd34028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -46,7 +46,7 @@ #include "mlx5_core.h" enum { - CMD_IF_REV = 4, + CMD_IF_REV = 5, }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c02cbcfd0fb8..443cc4d7b024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_EVENT_TYPE_PAGE_REQUEST: { u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); - s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); + s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); mlx5_core_req_pages_handler(dev, func_id, npages); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 72a5222447f5..f012658b6a92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; caps->log_max_mcg = out->hca_cap.log_max_mcg; - caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 748f10a155c4..3e6670c4a7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -55,33 +55,9 @@ enum { }; static DEFINE_SPINLOCK(health_lock); - static LIST_HEAD(health_list); static struct work_struct health_work; -static health_handler_t reg_handler; -int mlx5_register_health_report_handler(health_handler_t handler) -{ - spin_lock_irq(&health_lock); - if (reg_handler) { - spin_unlock_irq(&health_lock); - return -EEXIST; - } - reg_handler = handler; - spin_unlock_irq(&health_lock); - - return 0; -} -EXPORT_SYMBOL(mlx5_register_health_report_handler); - -void mlx5_unregister_health_report_handler(void) -{ - spin_lock_irq(&health_lock); - reg_handler = NULL; - spin_unlock_irq(&health_lock); -} -EXPORT_SYMBOL(mlx5_unregister_health_report_handler); - static void health_care(struct work_struct *work) { struct mlx5_core_health *health, *n; @@ -98,11 +74,8 @@ static void health_care(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); + /* nothing yet */ spin_lock_irq(&health_lock); - if (reg_handler) - reg_handler(dev->pdev, health->health, - sizeof(health->health)); - list_del_init(&health->list); spin_unlock_irq(&health_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4a3e137931a3..3a2408d44820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -43,10 +43,16 @@ enum { MLX5_PAGES_TAKE = 2 }; +enum { + MLX5_BOOT_PAGES = 1, + MLX5_INIT_PAGES = 2, + MLX5_POST_INIT_PAGES = 3 +}; + struct mlx5_pages_req { struct mlx5_core_dev *dev; u32 func_id; - s16 npages; + s32 npages; struct work_struct work; }; @@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { struct mlx5_query_pages_outbox { struct mlx5_outbox_hdr hdr; - __be16 num_boot_pages; + __be16 rsvd; __be16 func_id; - __be16 init_pages; - __be16 num_pages; + __be32 num_pages; }; struct mlx5_manage_pages_inbox { struct mlx5_inbox_hdr hdr; - __be16 rsvd0; + __be16 rsvd; __be16 func_id; - __be16 rsvd1; - __be16 num_entries; - u8 rsvd2[16]; + __be32 num_entries; __be64 pas[0]; }; struct mlx5_manage_pages_outbox { struct mlx5_outbox_hdr hdr; - u8 rsvd0[2]; - __be16 num_entries; - u8 rsvd1[20]; + __be32 num_entries; + u8 rsvd[4]; __be64 pas[0]; }; @@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) } static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, - s16 *pages, s16 *init_pages, u16 *boot_pages) + s32 *npages, int boot) { struct mlx5_query_pages_inbox in; struct mlx5_query_pages_outbox out; @@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); + in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; @@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); - if (pages) - *pages = be16_to_cpu(out.num_pages); - - if (init_pages) - *init_pages = be16_to_cpu(out.init_pages); - - if (boot_pages) - *boot_pages = be16_to_cpu(out.num_boot_pages); - + *npages = be32_to_cpu(out.num_pages); *func_id = be16_to_cpu(out.func_id); return err; @@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); in->func_id = cpu_to_be16(func_id); - in->num_entries = cpu_to_be16(npages); + in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); mlx5_core_dbg(dev, "err %d\n", err); if (err) { @@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); in.func_id = cpu_to_be16(func_id); - in.num_entries = cpu_to_be16(npages); + in.num_entries = cpu_to_be32(npages); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); if (err) { @@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, goto out_free; } - num_claimed = be16_to_cpu(out->num_entries); + num_claimed = be32_to_cpu(out->num_entries); if (nclaimed) *nclaimed = num_claimed; @@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) } void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s16 npages) + s32 npages) { struct mlx5_pages_req *req; @@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { - u16 uninitialized_var(boot_pages); - s16 uninitialized_var(init_pages); u16 uninitialized_var(func_id); + s32 uninitialized_var(npages); int err; - err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, - &boot_pages); + err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); if (err) return err; + mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", + npages, boot ? "boot" : "init", func_id); - mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", - init_pages, boot_pages, func_id); - return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); + return give_pages(dev, func_id, npages, 0); } static int optimal_reclaimed_pages(void) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 78ca75576b9e..e53caaaf1303 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3272,6 +3272,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) u8 val; int ret, max_sds_rings = adapter->max_sds_rings; + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, "Device is resetting\n"); + return -EBUSY; + } + if (qlcnic_get_diag_lock(adapter)) { netdev_info(netdev, "Device in diagnostics mode\n"); return -EBUSY; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 17c26a1158a0..c97e2e07b279 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -633,7 +633,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) return -EIO; } - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); qlcnic_83xx_idc_attach_driver(adapter); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index b8242bc0293b..a780b73e8b2b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2161,7 +2161,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_disable_mbx_intr; - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); pci_set_drvdata(pdev, adapter); @@ -3083,7 +3084,8 @@ done: adapter->fw_fail_cnt = 0; adapter->flags &= ~QLCNIC_FW_HANG; clear_bit(__QLCNIC_RESETTING, &adapter->state); - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); if (!qlcnic_clr_drv_state(adapter)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 10ed82b3baca..660c3f5b2237 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { err = qlcnic_get_beacon_state(adapter, &h_beacon_state); - if (!err) { - dev_info(&adapter->pdev->dev, - "Failed to get current beacon state\n"); + if (err) { + netdev_err(adapter->netdev, + "Failed to get current beacon state\n"); } else { if (h_beacon_state == QLCNIC_BEACON_DISABLE) ahw->beacon_state = 0; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 6f35f8404d68..d2e591955bdd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -524,6 +524,7 @@ rx_status_loop: PCI_DMA_FROMDEVICE); if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { dev->stats.rx_dropped++; + kfree_skb(new_skb); goto rx_next; } diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c9d942a5c335..1ef9d8a555aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) struct stmmac_priv *priv = (struct stmmac_priv *)p; unsigned int txsize = priv->dma_tx_size; unsigned int entry = priv->cur_tx % txsize; - struct dma_desc *desc = priv->dma_tx + entry; + struct dma_desc *desc; unsigned int nopaged_len = skb_headlen(skb); unsigned int bmax, len; + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; + if (priv->plat->enh_desc) bmax = BUF_SIZE_8KiB; else @@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) STMMAC_RING_MODE); wmb(); entry = (++priv->cur_tx) % txsize; - desc = priv->dma_tx + entry; + + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; desc->des2 = dma_map_single(priv->device, skb->data + bmax, len, DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2ccb36e8685..0a9bb9d30c3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, GFP_KERNEL); - if (unlikely(skb == NULL)) { + if (!skb) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); - return 1; + return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); priv->rx_skbuff[i] = skb; priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, priv->dma_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { + pr_err("%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } p->des2 = priv->rx_skbuff_dma[i]; @@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return 0; } +static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) +{ + if (priv->rx_skbuff[i]) { + dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], + priv->dma_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx_skbuff[i]); + } + priv->rx_skbuff[i] = NULL; +} + /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure @@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, * and allocates the socket buffers. It suppors the chained and ring * modes. */ -static void init_dma_desc_rings(struct net_device *dev) +static int init_dma_desc_rings(struct net_device *dev) { int i; struct stmmac_priv *priv = netdev_priv(dev); unsigned int txsize = priv->dma_tx_size; unsigned int rxsize = priv->dma_rx_size; unsigned int bfsize = 0; + int ret = -ENOMEM; /* Set the max buffer size according to the DESC mode * and the MTU. Note that RING mode allows 16KiB bsize. @@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) dma_extended_desc), &priv->dma_rx_phy, GFP_KERNEL); + if (!priv->dma_erx) + goto err_dma; + priv->dma_etx = dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_extended_desc), &priv->dma_tx_phy, GFP_KERNEL); - if ((!priv->dma_erx) || (!priv->dma_etx)) - return; + if (!priv->dma_etx) { + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + goto err_dma; + } } else { priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); + if (!priv->dma_rx) + goto err_dma; + priv->dma_tx = dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); - if ((!priv->dma_rx) || (!priv->dma_tx)) - return; + if (!priv->dma_tx) { + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + goto err_dma; + } } priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), GFP_KERNEL); + if (!priv->rx_skbuff_dma) + goto err_rx_skbuff_dma; + priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), GFP_KERNEL); + if (!priv->rx_skbuff) + goto err_rx_skbuff; + priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), GFP_KERNEL); + if (!priv->tx_skbuff_dma) + goto err_tx_skbuff_dma; + priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), GFP_KERNEL); + if (!priv->tx_skbuff) + goto err_tx_skbuff; + if (netif_msg_probe(priv)) { pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); @@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) else p = priv->dma_rx + i; - if (stmmac_init_rx_buffers(priv, p, i)) - break; + ret = stmmac_init_rx_buffers(priv, p, i); + if (ret) + goto err_init_rx_buffers; if (netif_msg_probe(priv)) pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], @@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) if (netif_msg_hw(priv)) stmmac_display_rings(priv); + + return 0; +err_init_rx_buffers: + while (--i >= 0) + stmmac_free_rx_buffers(priv, i); + kfree(priv->tx_skbuff); +err_tx_skbuff: + kfree(priv->tx_skbuff_dma); +err_tx_skbuff_dma: + kfree(priv->rx_skbuff); +err_rx_skbuff: + kfree(priv->rx_skbuff_dma); +err_rx_skbuff_dma: + if (priv->extend_desc) { + dma_free_coherent(priv->device, priv->dma_tx_size * + sizeof(struct dma_extended_desc), + priv->dma_etx, priv->dma_tx_phy); + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + } else { + dma_free_coherent(priv->device, + priv->dma_tx_size * sizeof(struct dma_desc), + priv->dma_tx, priv->dma_tx_phy); + dma_free_coherent(priv->device, + priv->dma_rx_size * sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + } +err_dma: + return ret; } static void dma_free_rx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_rx_size; i++) { - if (priv->rx_skbuff[i]) { - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], - priv->dma_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); - } - priv->rx_skbuff[i] = NULL; - } + for (i = 0; i < priv->dma_rx_size; i++) + stmmac_free_rx_buffers(priv, i); } static void dma_free_tx_skbufs(struct stmmac_priv *priv) @@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); - init_dma_desc_rings(dev); + + ret = init_dma_desc_rings(dev); + if (ret < 0) { + pr_err("%s: DMA descriptors initialization failed\n", __func__); + goto dma_desc_error; + } /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { - pr_err("%s: DMA initialization failed\n", __func__); + pr_err("%s: DMA engine initialization failed\n", __func__); goto init_error; } @@ -1672,6 +1744,7 @@ wolirq_error: init_error: free_dma_desc_resources(priv); +dma_desc_error: if (priv->phydev) phy_disconnect(priv->phydev); phy_error: diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index ef776310fab1..d022bf936572 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } - netif_rx(skb); + netif_receive_skb(skb); stats->rx_bytes += pkt_len; stats->rx_packets++; @@ -2904,6 +2904,7 @@ out: return ret; err_iounmap: + netif_napi_del(&vptr->napi); iounmap(regs); err_free_dev: free_netdev(netdev); @@ -2924,6 +2925,7 @@ static int velocity_remove(struct device *dev) struct velocity_info *vptr = netdev_priv(netdev); unregister_netdev(netdev); + netif_napi_del(&vptr->napi); iounmap(vptr->mac_regs); free_netdev(netdev); velocity_nics--; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a822207b8241..510a9b60fde1 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -742,6 +742,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) return -EADDRNOTAVAIL; } + if (data && data[IFLA_MACVLAN_FLAGS] && + nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) + return -EINVAL; + if (data && data[IFLA_MACVLAN_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { case MACVLAN_MODE_PRIVATE: diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 8f6056d9ba97..1c6e1116eb0a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -715,10 +715,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } - if (vlan) + if (vlan) { + local_bh_disable(); macvlan_start_xmit(skb, vlan->dev); - else + local_bh_enable(); + } else { kfree_skb(skb); + } rcu_read_unlock(); return total_len; @@ -809,8 +812,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, done: rcu_read_lock(); vlan = rcu_dereference(q->vlan); - if (vlan) + if (vlan) { + preempt_disable(); macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); + preempt_enable(); + } rcu_read_unlock(); return ret ? ret : copied; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 978d8654b14a..7ed13cc0dcb2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -977,8 +977,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, u32 rxhash; if (!(tun->flags & TUN_NO_PI)) { - if ((len -= sizeof(pi)) > total_len) + if (len < sizeof(pi)) return -EINVAL; + len -= sizeof(pi); if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) return -EFAULT; @@ -986,8 +987,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (tun->flags & TUN_VNET_HDR) { - if ((len -= tun->vnet_hdr_sz) > total_len) + if (len < tun->vnet_hdr_sz) return -EINVAL; + len -= tun->vnet_hdr_sz; if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) return -EFAULT; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8bf31d9a397c..570ad7aa7204 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1430,7 +1430,7 @@ static int vxlan_open(struct net_device *dev) return -ENOTCONN; if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && - ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { + vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { vxlan_sock_hold(vs); dev_hold(dev); queue_work(vxlan_wq, &vxlan->igmp_join); @@ -1837,8 +1837,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); - flush_workqueue(vxlan_wq); - spin_lock(&vn->sock_lock); hlist_del_rcu(&vxlan->hlist); spin_unlock(&vn->sock_lock); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dbdc5f7e2b29..01e264fb50e0 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus) /* ACPI bus type */ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) { - struct pci_dev * pci_dev; - u64 addr; + struct pci_dev *pci_dev = to_pci_dev(dev); + bool is_bridge; + u64 addr; - pci_dev = to_pci_dev(dev); + /* + * pci_is_bridge() is not suitable here, because pci_dev->subordinate + * is set only after acpi_pci_find_device() has been called for the + * given device. + */ + is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE + || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); + *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); if (!*handle) return -ENODEV; return 0; diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index 767fee2ab340..26019531db15 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c @@ -23,6 +23,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/delay.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/of_device.h> @@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev) } #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ -static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) +static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) { + int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */ /* - * The datasheet doesn't say which way round the - * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, - * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS + * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010 + * states: + * | The order in which registers are updated is + * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds. + * | (This list is in bitfield order, from LSB to MSB, as they would + * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT + * | register. For example, the Seconds register corresponds to + * | STALE_REGS or NEW_REGS containing 0x80.) */ - while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & - (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) - cpu_relax(); + do { + if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) & + (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))) + return 0; + udelay(1); + } while (--timeout > 0); + return (readl(rtc_data->io + STMP3XXX_RTC_STAT) & + (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0; } /* Time read/write */ static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) { + int ret; struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); - stmp3xxx_wait_time(rtc_data); + ret = stmp3xxx_wait_time(rtc_data); + if (ret) + return ret; + rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); return 0; } @@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t) struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); - stmp3xxx_wait_time(rtc_data); - return 0; + return stmp3xxx_wait_time(rtc_data); } /* interrupt(s) handler */ diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 17150a778984..451bf99582ff 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) rc = cqr->intrc; else rc = -EIO; + + /* kick tasklets */ + dasd_schedule_device_bh(device); + if (device->block) + dasd_schedule_block_bh(device->block); + return rc; } diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index b6d1f92ed33c..c18c68150e9f 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -38,7 +38,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.22" +#define DRV_VERSION "1.5.0.23" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 5f09d1814d26..42e15ee6e1bb 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); INIT_WORK(&fnic->event_work, fnic_handle_event); skb_queue_head_init(&fnic->fip_frame_queue); - spin_lock_irqsave(&fnic_list_lock, flags); - if (!fnic_fip_queue) { - fnic_fip_queue = - create_singlethread_workqueue("fnic_fip_q"); - if (!fnic_fip_queue) { - spin_unlock_irqrestore(&fnic_list_lock, flags); - printk(KERN_ERR PFX "fnic FIP work queue " - "create failed\n"); - err = -ENOMEM; - goto err_out_free_max_pool; - } - } - spin_unlock_irqrestore(&fnic_list_lock, flags); INIT_LIST_HEAD(&fnic->evlist); INIT_LIST_HEAD(&fnic->vlans); } else { @@ -960,6 +947,13 @@ static int __init fnic_init_module(void) spin_lock_init(&fnic_list_lock); INIT_LIST_HEAD(&fnic_list); + fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); + if (!fnic_fip_queue) { + printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); + err = -ENOMEM; + goto err_create_fip_workq; + } + fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); @@ -978,6 +972,8 @@ static int __init fnic_init_module(void) err_pci_register: fc_release_transport(fnic_fc_transport); err_fc_transport: + destroy_workqueue(fnic_fip_queue); +err_create_fip_workq: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: kmem_cache_destroy(fnic_io_req_cache); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0177295599e0..1f0ca68409d4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance) break; } - /* - * We expect the FW state to be READY - */ - if (megasas_transition_to_ready(instance, 0)) - goto fail_ready_state; + if (megasas_transition_to_ready(instance, 0)) { + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset + (instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + dev_info(&instance->pdev->dev, + "megasas: FW restarted successfully from %s!\n", + __func__); + + /*waitting for about 30 second before retry*/ + ssleep(30); + + if (megasas_transition_to_ready(instance, 0)) + goto fail_ready_state; + } /* * MSI-X host index 0 is common for all adapter. diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3b1ea34e1f5a..eaa808e6ba91 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, { int i, result; + if (sdev->skip_vpd_pages) + goto fail; + /* Ask for all the pages supported by this device */ result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); if (result) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 2168258fb2c3..74b88efde6ad 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) vscsi->affinity_hint_set = true; } else { - for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++) + for (i = 0; i < vscsi->num_queues; i++) virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); vscsi->affinity_hint_set = false; diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 222d3e37fc28..707966bd5610 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) else buf = (void *)t->tx_buf; t->tx_dma = dma_map_single(&spi->dev, buf, - t->len, DMA_FROM_DEVICE); + t->len, DMA_TO_DEVICE); if (!t->tx_dma) { ret = -EFAULT; goto err_tx_map; diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index dcceed29d31a..81972fa47beb 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1811,10 +1811,12 @@ static int zcache_comp_init(void) #else if (*zcache_comp_name != '\0') { ret = crypto_has_comp(zcache_comp_name, 0, 0); - if (!ret) + if (!ret) { pr_info("zcache: %s not supported\n", zcache_comp_name); - goto out; + ret = 1; + goto out; + } } if (!ret) strcpy(zcache_comp_name, "lzo"); diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 609dbc2f7151..83b4ef4dfcf8 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf, /* Determine if it is a Rigol or not */ data->rigol_quirk = 0; dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", - data->usb_dev->descriptor.idVendor, - data->usb_dev->descriptor.idProduct); + le16_to_cpu(data->usb_dev->descriptor.idVendor), + le16_to_cpu(data->usb_dev->descriptor.idProduct)); for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { - if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && - (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { + if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) && + (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) { dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); data->rigol_quirk = 1; break; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 4a8a1d68002c..558313de4911 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4798,7 +4798,8 @@ static void hub_events(void) hub->ports[i - 1]->child; dev_dbg(hub_dev, "warm reset port %d\n", i); - if (!udev) { + if (!udev || !(portstatus & + USB_PORT_STAT_CONNECTION)) { status = hub_port_reset(hub, i, NULL, HUB_BH_RESET_TIME, true); @@ -4808,8 +4809,8 @@ static void hub_events(void) usb_lock_device(udev); status = usb_reset_device(udev); usb_unlock_device(udev); + connect_change = 0; } - connect_change = 0; } if (connect_change) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a63598895077..5b44cd47da5b 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04d8, 0x000c), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* CarrolTouch 4000U */ + { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* CarrolTouch 4500U */ + { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ { USB_DEVICE(0x04e8, 0x6601), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index f80d0330d548..8e3c878f38cf 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1391,21 +1391,20 @@ iso_stream_schedule ( /* Behind the scheduling threshold? */ if (unlikely(start < next)) { + unsigned now2 = (now - base) & (mod - 1); /* USB_ISO_ASAP: Round up to the first available slot */ if (urb->transfer_flags & URB_ISO_ASAP) start += (next - start + period - 1) & -period; /* - * Not ASAP: Use the next slot in the stream. If - * the entire URB falls before the threshold, fail. + * Not ASAP: Use the next slot in the stream, + * no matter what. */ - else if (start + span - period < next) { - ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", + else if (start + span - period < now2) { + ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", urb, start + base, - span - period, next + base); - status = -EXDEV; - goto fail; + span - period, now2 + base); } } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index df6978abd7e6..6f8c2fd47675 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -24,6 +24,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/dmapool.h> +#include <linux/dma-mapping.h> #include "xhci.h" diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 41eb4fc33453..9478caa2f71f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -27,6 +27,7 @@ #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/dmi.h> +#include <linux/dma-mapping.h> #include "xhci.h" diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index eb3c8c142fa9..eeb27208c0d1 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface, /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", - udev->descriptor.idProduct, dev->serial_number, + le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, (dev->minor - ADU_MINOR_BASE)); exit: dbg(2, " %s : leave, return value %p (dev)", __func__, dev); diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 5a979729f8ec..58c17fdc85eb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial) if (d_details == NULL) { dev_err(&serial->dev->dev, "%s - unknown product id %x\n", __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); - return 1; + return -ENODEV; } /* Setup private data for serial driver */ diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 51da424327b0..b01300164fc0 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -90,6 +90,7 @@ struct urbtracker { struct list_head urblist_entry; struct kref ref_count; struct urb *urb; + struct usb_ctrlrequest *setup; }; enum mos7715_pp_modes { @@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref) struct mos7715_parport *mos_parport = urbtrack->mos_parport; usb_free_urb(urbtrack->urb); + kfree(urbtrack->setup); kfree(urbtrack); kref_put(&mos_parport->ref_count, destroy_mos_parport); } @@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, struct urbtracker *urbtrack; int ret_val; unsigned long flags; - struct usb_ctrlrequest setup; struct usb_serial *serial = mos_parport->serial; struct usb_device *usbdev = serial->dev; @@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, kfree(urbtrack); return -ENOMEM; } - setup.bRequestType = (__u8)0x40; - setup.bRequest = (__u8)0x0e; - setup.wValue = get_reg_value(reg, dummy); - setup.wIndex = get_reg_index(reg); - setup.wLength = 0; + urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL); + if (!urbtrack->setup) { + usb_free_urb(urbtrack->urb); + kfree(urbtrack); + return -ENOMEM; + } + urbtrack->setup->bRequestType = (__u8)0x40; + urbtrack->setup->bRequest = (__u8)0x0e; + urbtrack->setup->wValue = get_reg_value(reg, dummy); + urbtrack->setup->wIndex = get_reg_index(reg); + urbtrack->setup->wLength = 0; usb_fill_control_urb(urbtrack->urb, usbdev, usb_sndctrlpipe(usbdev, 0), - (unsigned char *)&setup, + (unsigned char *)urbtrack->setup, NULL, 0, async_complete, urbtrack); kref_init(&urbtrack->ref_count); INIT_LIST_HEAD(&urbtrack->urblist_entry); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index d953d674f222..3bac4693c038 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -2193,7 +2193,7 @@ static int mos7810_check(struct usb_serial *serial) static int mos7840_probe(struct usb_serial *serial, const struct usb_device_id *id) { - u16 product = serial->dev->descriptor.idProduct; + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); u8 *buf; int device_type; diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 375b5a400b6f..5c9f9b1d7736 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev) char buf[32]; /* try ID specific firmware first, then try generic firmware */ - sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, - dev->descriptor.idProduct); + sprintf(buf, "ti_usb-v%04x-p%04x.fw", + le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); status = request_firmware(&fw_p, buf, &dev->dev); if (status != 0) { buf[0] = '\0'; - if (dev->descriptor.idVendor == MTS_VENDOR_ID) { - switch (dev->descriptor.idProduct) { + if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) { + switch (le16_to_cpu(dev->descriptor.idProduct)) { case MTS_CDMA_PRODUCT_ID: strcpy(buf, "mts_cdma.fw"); break; diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 8257d30c4072..85365784040b 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb) tty_flip_buffer_push(&port->port); } else dev_dbg(dev, "%s: empty read urb received\n", __func__); - - /* Resubmit urb so we continue receiving */ - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) { - if (err != -EPERM) { - dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); - /* busy also in error unless we are killed */ - usb_mark_last_busy(port->serial->dev); - } - } else { + } + /* Resubmit urb so we continue receiving */ + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + if (err != -EPERM) { + dev_err(dev, "%s: resubmit read urb failed. (%d)\n", + __func__, err); + /* busy also in error unless we are killed */ usb_mark_last_busy(port->serial->dev); } + } else { + usb_mark_last_busy(port->serial->dev); } } diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 16968c899493..d3493ca0525d 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) } spin_lock_irqsave(&xfer->lock, flags); rpipe = xfer->ep->hcpriv; + if (rpipe == NULL) { + pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", + __func__, wa_xfer_id(xfer), + "Probably already aborted.\n" ); + goto out_unlock; + } /* Check the delayed list -> if there, release and complete */ spin_lock_irqsave(&wa->xfer_list_lock, flags2); if (!list_empty(&xfer->list_node) && xfer->seg == NULL) @@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb) break; } usb_status = xfer_result->bTransferStatus & 0x3f; - if (usb_status == WA_XFER_STATUS_ABORTED - || usb_status == WA_XFER_STATUS_NOT_FOUND) + if (usb_status == WA_XFER_STATUS_NOT_FOUND) /* taken care of already */ break; xfer_id = xfer_result->dwTransferID; diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 3ba37713b1f9..dc09ebe4aba5 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -239,24 +239,6 @@ static const struct fb_bitfield def_rgb565[] = { } }; -static const struct fb_bitfield def_rgb666[] = { - [RED] = { - .offset = 16, - .length = 6, - }, - [GREEN] = { - .offset = 8, - .length = 6, - }, - [BLUE] = { - .offset = 0, - .length = 6, - }, - [TRANSP] = { /* no support for transparency */ - .length = 0, - } -}; - static const struct fb_bitfield def_rgb888[] = { [RED] = { .offset = 16, @@ -309,9 +291,6 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var, break; case STMLCDIF_16BIT: case STMLCDIF_18BIT: - /* 24 bit to 18 bit mapping */ - rgb = def_rgb666; - break; case STMLCDIF_24BIT: /* real 24 bit */ rgb = def_rgb888; @@ -453,11 +432,6 @@ static int mxsfb_set_par(struct fb_info *fb_info) return -EINVAL; case STMLCDIF_16BIT: case STMLCDIF_18BIT: - /* 24 bit to 18 bit mapping */ - ctrl |= CTRL_DF24; /* ignore the upper 2 bits in - * each colour component - */ - break; case STMLCDIF_24BIT: /* real 24 bit */ break; diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c index 5338f362293b..1b60698f141e 100644 --- a/drivers/video/omap2/displays-new/connector-analog-tv.c +++ b/drivers/video/omap2/displays-new/connector-analog-tv.c @@ -28,6 +28,20 @@ struct panel_drv_data { bool invert_polarity; }; +static const struct omap_video_timings tvc_pal_timings = { + .x_res = 720, + .y_res = 574, + .pixel_clock = 13500, + .hsw = 64, + .hfp = 12, + .hbp = 68, + .vsw = 5, + .vfp = 5, + .vbp = 41, + + .interlace = true, +}; + #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) static int tvc_connect(struct omap_dss_device *dssdev) @@ -212,14 +226,14 @@ static int tvc_probe(struct platform_device *pdev) return -ENODEV; } - ddata->timings = omap_dss_pal_timings; + ddata->timings = tvc_pal_timings; dssdev = &ddata->dssdev; dssdev->driver = &tvc_driver; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; - dssdev->panel.timings = omap_dss_pal_timings; + dssdev->panel.timings = tvc_pal_timings; r = omapdss_register_display(dssdev); if (r) { diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index eaf133384a8f..8bc5e8ccb091 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -36,16 +36,23 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, u64 extent_item_pos, struct extent_inode_elem **eie) { - u64 data_offset; - u64 data_len; + u64 offset = 0; struct extent_inode_elem *e; - data_offset = btrfs_file_extent_offset(eb, fi); - data_len = btrfs_file_extent_num_bytes(eb, fi); + if (!btrfs_file_extent_compression(eb, fi) && + !btrfs_file_extent_encryption(eb, fi) && + !btrfs_file_extent_other_encoding(eb, fi)) { + u64 data_offset; + u64 data_len; - if (extent_item_pos < data_offset || - extent_item_pos >= data_offset + data_len) - return 1; + data_offset = btrfs_file_extent_offset(eb, fi); + data_len = btrfs_file_extent_num_bytes(eb, fi); + + if (extent_item_pos < data_offset || + extent_item_pos >= data_offset + data_len) + return 1; + offset = extent_item_pos - data_offset; + } e = kmalloc(sizeof(*e), GFP_NOFS); if (!e) @@ -53,7 +60,7 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, e->next = *eie; e->inum = key->objectid; - e->offset = key->offset + (extent_item_pos - data_offset); + e->offset = key->offset + offset; *eie = e; return 0; @@ -189,7 +196,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb; struct btrfs_key key; struct btrfs_file_extent_item *fi; - struct extent_inode_elem *eie = NULL; + struct extent_inode_elem *eie = NULL, *old = NULL; u64 disk_byte; if (level != 0) { @@ -223,6 +230,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, if (disk_byte == wanted_disk_byte) { eie = NULL; + old = NULL; if (extent_item_pos) { ret = check_extent_in_eb(&key, eb, fi, *extent_item_pos, @@ -230,18 +238,20 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, if (ret < 0) break; } - if (!ret) { - ret = ulist_add(parents, eb->start, - (uintptr_t)eie, GFP_NOFS); - if (ret < 0) - break; - if (!extent_item_pos) { - ret = btrfs_next_old_leaf(root, path, - time_seq); - continue; - } + if (ret > 0) + goto next; + ret = ulist_add_merge(parents, eb->start, + (uintptr_t)eie, + (u64 *)&old, GFP_NOFS); + if (ret < 0) + break; + if (!ret && extent_item_pos) { + while (old->next) + old = old->next; + old->next = eie; } } +next: ret = btrfs_next_old_item(root, path, time_seq); } diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 5bf4c39e2ad6..ed504607d8ec 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1271,7 +1271,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, BUG_ON(!eb_rewin); } - extent_buffer_get(eb_rewin); btrfs_tree_read_unlock(eb); free_extent_buffer(eb); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 583d98bd065e..fe443fece851 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4048,7 +4048,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } while (!end) { - u64 offset_in_extent; + u64 offset_in_extent = 0; /* break if the extent we found is outside the range */ if (em->start >= max || extent_map_end(em) < off) @@ -4064,9 +4064,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, /* * record the offset from the start of the extent - * for adjusting the disk offset below + * for adjusting the disk offset below. Only do this if the + * extent isn't compressed since our in ram offset may be past + * what we have actually allocated on disk. */ - offset_in_extent = em_start - em->start; + if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) + offset_in_extent = em_start - em->start; em_end = extent_map_end(em); em_len = em_end - em_start; emflags = em->flags; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index a005fe2c072a..8e686a427ce2 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -596,20 +596,29 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, if (no_splits) goto next; - if (em->block_start < EXTENT_MAP_LAST_BYTE && - em->start < start) { + if (em->start < start) { split->start = em->start; split->len = start - em->start; - split->orig_start = em->orig_start; - split->block_start = em->block_start; - if (compressed) - split->block_len = em->block_len; - else - split->block_len = split->len; - split->ram_bytes = em->ram_bytes; - split->orig_block_len = max(split->block_len, - em->orig_block_len); + if (em->block_start < EXTENT_MAP_LAST_BYTE) { + split->orig_start = em->orig_start; + split->block_start = em->block_start; + + if (compressed) + split->block_len = em->block_len; + else + split->block_len = split->len; + split->orig_block_len = max(split->block_len, + em->orig_block_len); + split->ram_bytes = em->ram_bytes; + } else { + split->orig_start = split->start; + split->block_len = 0; + split->block_start = em->block_start; + split->orig_block_len = 0; + split->ram_bytes = split->len; + } + split->generation = gen; split->bdev = em->bdev; split->flags = flags; @@ -620,8 +629,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split = split2; split2 = NULL; } - if (em->block_start < EXTENT_MAP_LAST_BYTE && - testend && em->start + em->len > start + len) { + if (testend && em->start + em->len > start + len) { u64 diff = start + len - em->start; split->start = start + len; @@ -630,18 +638,28 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->flags = flags; split->compress_type = em->compress_type; split->generation = gen; - split->orig_block_len = max(em->block_len, + + if (em->block_start < EXTENT_MAP_LAST_BYTE) { + split->orig_block_len = max(em->block_len, em->orig_block_len); - split->ram_bytes = em->ram_bytes; - if (compressed) { - split->block_len = em->block_len; - split->block_start = em->block_start; - split->orig_start = em->orig_start; + split->ram_bytes = em->ram_bytes; + if (compressed) { + split->block_len = em->block_len; + split->block_start = em->block_start; + split->orig_start = em->orig_start; + } else { + split->block_len = split->len; + split->block_start = em->block_start + + diff; + split->orig_start = em->orig_start; + } } else { - split->block_len = split->len; - split->block_start = em->block_start + diff; - split->orig_start = em->orig_start; + split->ram_bytes = split->len; + split->orig_start = split->start; + split->block_len = 0; + split->block_start = em->block_start; + split->orig_block_len = 0; } ret = add_extent_mapping(em_tree, split, modified); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6d1b93c8aafb..021694c08181 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2166,16 +2166,23 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) continue; - extent_offset = btrfs_file_extent_offset(leaf, extent); - if (key.offset - extent_offset != offset) + /* + * 'offset' refers to the exact key.offset, + * NOT the 'offset' field in btrfs_extent_data_ref, ie. + * (key.offset - extent_offset). + */ + if (key.offset != offset) continue; + extent_offset = btrfs_file_extent_offset(leaf, extent); num_bytes = btrfs_file_extent_num_bytes(leaf, extent); + if (extent_offset >= old->extent_offset + old->offset + old->len || extent_offset + num_bytes <= old->extent_offset + old->offset) continue; + ret = 0; break; } @@ -2187,7 +2194,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, backref->root_id = root_id; backref->inum = inum; - backref->file_pos = offset + extent_offset; + backref->file_pos = offset; backref->num_bytes = num_bytes; backref->extent_offset = extent_offset; backref->generation = btrfs_file_extent_generation(leaf, extent); @@ -2210,7 +2217,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path, new->path = path; list_for_each_entry_safe(old, tmp, &new->head, list) { - ret = iterate_inodes_from_logical(old->bytenr, fs_info, + ret = iterate_inodes_from_logical(old->bytenr + + old->extent_offset, fs_info, path, record_one_backref, old); BUG_ON(ret < 0 && ret != -ENOENT); @@ -4391,9 +4399,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) int mask = attr->ia_valid; int ret; - if (newsize == oldsize) - return 0; - /* * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a * special case where we need to update the times despite not having @@ -5165,14 +5170,31 @@ next: } /* Reached end of directory/root. Bump pos past the last item. */ - if (key_type == BTRFS_DIR_INDEX_KEY) - /* - * 32-bit glibc will use getdents64, but then strtol - - * so the last number we can serve is this. - */ - ctx->pos = 0x7fffffff; - else - ctx->pos++; + ctx->pos++; + + /* + * Stop new entries from being returned after we return the last + * entry. + * + * New directory entries are assigned a strictly increasing + * offset. This means that new entries created during readdir + * are *guaranteed* to be seen in the future by that readdir. + * This has broken buggy programs which operate on names as + * they're returned by readdir. Until we re-use freed offsets + * we have this hack to stop new entries from being returned + * under the assumption that they'll never reach this huge + * offset. + * + * This is being careful not to overflow 32bit loff_t unless the + * last entry requires it because doing so has broken 32bit apps + * in the past. + */ + if (key_type == BTRFS_DIR_INDEX_KEY) { + if (ctx->pos >= INT_MAX) + ctx->pos = LLONG_MAX; + else + ctx->pos = INT_MAX; + } nopos: ret = 0; err: diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index d58cce77fc6c..af1931a5960d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -983,12 +983,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, * a dirty root struct and adds it into the list of dead roots that need to * be deleted */ -int btrfs_add_dead_root(struct btrfs_root *root) +void btrfs_add_dead_root(struct btrfs_root *root) { spin_lock(&root->fs_info->trans_lock); - list_add_tail(&root->root_list, &root->fs_info->dead_roots); + if (list_empty(&root->root_list)) + list_add_tail(&root->root_list, &root->fs_info->dead_roots); spin_unlock(&root->fs_info->trans_lock); - return 0; } /* @@ -1925,7 +1925,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) } root = list_first_entry(&fs_info->dead_roots, struct btrfs_root, root_list); - list_del(&root->root_list); + list_del_init(&root->root_list); spin_unlock(&fs_info->trans_lock); pr_debug("btrfs: cleaner removing %llu\n", diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 005b0375d18c..defbc4269897 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -143,7 +143,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); -int btrfs_add_dead_root(struct btrfs_root *root); +void btrfs_add_dead_root(struct btrfs_root *root); int btrfs_defrag_root(struct btrfs_root *root); int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 2c6791493637..ff60d8978ae2 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3746,8 +3746,9 @@ next_slot: } log_extents: + btrfs_release_path(path); + btrfs_release_path(dst_path); if (fast_search) { - btrfs_release_path(dst_path); ret = btrfs_log_changed_extents(trans, root, inode, dst_path); if (ret) { err = ret; @@ -3764,8 +3765,6 @@ log_extents: } if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { - btrfs_release_path(path); - btrfs_release_path(dst_path); ret = log_directory_changes(trans, root, inode, path, dst_path); if (ret) { err = ret; diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 45e57cc38200..fc6f4f3a1a9d 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server) server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(server->secmech.md5)) { cifs_dbg(VFS, "could not allocate crypto md5\n"); - return PTR_ERR(server->secmech.md5); + rc = PTR_ERR(server->secmech.md5); + server->secmech.md5 = NULL; + return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(server->secmech.md5); server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); if (!server->secmech.sdescmd5) { - rc = -ENOMEM; crypto_free_shash(server->secmech.md5); server->secmech.md5 = NULL; - return rc; + return -ENOMEM; } server->secmech.sdescmd5->shash.tfm = server->secmech.md5; server->secmech.sdescmd5->shash.flags = 0x0; @@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) if (blobptr + attrsize > blobend) break; if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { - if (!attrsize) + if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN) break; if (!ses->domainName) { ses->domainName = @@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) { + int rc; unsigned int size; /* check if already allocated */ @@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); if (IS_ERR(server->secmech.hmacmd5)) { cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); - return PTR_ERR(server->secmech.hmacmd5); + rc = PTR_ERR(server->secmech.hmacmd5); + server->secmech.hmacmd5 = NULL; + return rc; } size = sizeof(struct shash_desc) + diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 4bdd547dbf6f..85ea98d139fc 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb) goto out_no_root; } + if (cifs_sb_master_tcon(cifs_sb)->nocase) + sb->s_d_op = &cifs_ci_dentry_ops; + else + sb->s_d_op = &cifs_dentry_ops; + sb->s_root = d_make_root(inode); if (!sb->s_root) { rc = -ENOMEM; goto out_no_root; } - /* do that *after* d_make_root() - we want NULL ->d_op for root here */ - if (cifs_sb_master_tcon(cifs_sb)->nocase) - sb->s_d_op = &cifs_ci_dentry_ops; - else - sb->s_d_op = &cifs_dentry_ops; - #ifdef CONFIG_CIFS_NFSD_EXPORT if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cifs_dbg(FYI, "export ops supported\n"); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 1fdc37041057..52ca861ed35e 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -44,6 +44,7 @@ #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) #define MAX_SERVER_SIZE 15 #define MAX_SHARE_SIZE 80 +#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */ #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ @@ -369,6 +370,9 @@ struct smb_version_operations { void (*generate_signingkey)(struct TCP_Server_Info *server); int (*calc_signature)(struct smb_rqst *rqst, struct TCP_Server_Info *server); + int (*query_mf_symlink)(const unsigned char *path, char *pbuf, + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, + unsigned int xid); }; struct smb_version_values { diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f7e584d047e2..b29a012bed33 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work); struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete); void cifs_writedata_release(struct kref *refcount); - +int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, + unsigned int xid); #endif /* _CIFSPROTO_H */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fa68813396b5..d67c550c4980 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, if (string == NULL) goto out_nomem; - if (strnlen(string, 256) == 256) { + if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN) + == CIFS_MAX_DOMAINNAME_LEN) { printk(KERN_WARNING "CIFS: domain name too" " long\n"); goto cifs_parse_mount_err; @@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses) #ifdef CONFIG_KEYS -/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ -#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) +/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ +#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) /* Populate username and pw fields from keyring if possible */ static int diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1e57f36ea1b2..7e36ae34e947 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) oflags, &oplock, &cfile->fid.netfid, xid); if (rc == 0) { cifs_dbg(FYI, "posix reopen succeeded\n"); + oparms.reconnect = true; goto reopen_success; } /* diff --git a/fs/cifs/link.c b/fs/cifs/link.c index b83c3f5646bd..562044f700e5 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr) } int -CIFSCheckMFSymlink(struct cifs_fattr *fattr, - const unsigned char *path, - struct cifs_sb_info *cifs_sb, unsigned int xid) +open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, + unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, + unsigned int xid) { int rc; int oplock = 0; __u16 netfid = 0; struct tcon_link *tlink; - struct cifs_tcon *pTcon; + struct cifs_tcon *ptcon; struct cifs_io_parms io_parms; - u8 *buf; - char *pbuf; - unsigned int bytes_read = 0; int buf_type = CIFS_NO_BUFFER; - unsigned int link_len = 0; FILE_ALL_INFO file_info; - if (!CIFSCouldBeMFSymlink(fattr)) - /* it's not a symlink */ - return 0; - tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); - pTcon = tlink_tcon(tlink); + ptcon = tlink_tcon(tlink); - rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, + rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ, CREATE_NOT_DIR, &netfid, &oplock, &file_info, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - if (rc != 0) - goto out; + if (rc != 0) { + cifs_put_tlink(tlink); + return rc; + } if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { - CIFSSMBClose(xid, pTcon, netfid); + CIFSSMBClose(xid, ptcon, netfid); + cifs_put_tlink(tlink); /* it's not a symlink */ - goto out; + return rc; } - buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); - if (!buf) { - rc = -ENOMEM; - goto out; - } - pbuf = buf; io_parms.netfid = netfid; io_parms.pid = current->tgid; - io_parms.tcon = pTcon; + io_parms.tcon = ptcon; io_parms.offset = 0; io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; - rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); - CIFSSMBClose(xid, pTcon, netfid); - if (rc != 0) { - kfree(buf); + rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type); + CIFSSMBClose(xid, ptcon, netfid); + cifs_put_tlink(tlink); + return rc; +} + + +int +CIFSCheckMFSymlink(struct cifs_fattr *fattr, + const unsigned char *path, + struct cifs_sb_info *cifs_sb, unsigned int xid) +{ + int rc = 0; + u8 *buf = NULL; + unsigned int link_len = 0; + unsigned int bytes_read = 0; + struct cifs_tcon *ptcon; + + if (!CIFSCouldBeMFSymlink(fattr)) + /* it's not a symlink */ + return 0; + + buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; goto out; } + ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); + if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink)) + rc = ptcon->ses->server->ops->query_mf_symlink(path, buf, + &bytes_read, cifs_sb, xid); + else + goto out; + + if (rc != 0) + goto out; + + if (bytes_read == 0) /* not a symlink */ + goto out; + rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); - kfree(buf); if (rc == -EINVAL) { /* it's not a symlink */ rc = 0; @@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; fattr->cf_dtype = DT_LNK; out: - cifs_put_tlink(tlink); + kfree(buf); return rc; } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index ab8778469394..69d2c826a23b 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, return; } + /* + * If we know that the inode will need to be revalidated immediately, + * then don't create a new dentry for it. We'll end up doing an on + * the wire call either way and this spares us an invalidation. + */ + if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) + return; + dentry = d_alloc(parent, name); if (!dentry) return; diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 79358e341fd2..08dd37bb23aa 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, bytes_ret = 0; } else bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, - 256, nls_cp); + CIFS_MAX_DOMAINNAME_LEN, nls_cp); bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null terminator */ @@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, /* copy domain */ if (ses->domainName != NULL) { - strncpy(bcc_ptr, ses->domainName, 256); - bcc_ptr += strnlen(ses->domainName, 256); + strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); + bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); } /* else we will send a null domain name so the server will default to its own domain */ *bcc_ptr = 0; diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 6457690731a2..60943978aec3 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = { .mand_lock = cifs_mand_lock, .mand_unlock_range = cifs_unlock_range, .push_mand_locks = cifs_push_mandatory_locks, + .query_mf_symlink = open_query_close_cifs_symlink, }; struct smb_version_values smb1_values = { diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 301b191270b9..4f2300d020c7 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -42,6 +42,7 @@ static int smb2_crypto_shash_allocate(struct TCP_Server_Info *server) { + int rc; unsigned int size; if (server->secmech.sdeschmacsha256 != NULL) @@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server) server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(server->secmech.hmacsha256)) { cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); - return PTR_ERR(server->secmech.hmacsha256); + rc = PTR_ERR(server->secmech.hmacsha256); + server->secmech.hmacsha256 = NULL; + return rc; } size = sizeof(struct shash_desc) + @@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server) server->secmech.sdeschmacsha256 = NULL; crypto_free_shash(server->secmech.hmacsha256); server->secmech.hmacsha256 = NULL; - return PTR_ERR(server->secmech.cmacaes); + rc = PTR_ERR(server->secmech.cmacaes); + server->secmech.cmacaes = NULL; + return rc; } size = sizeof(struct shash_desc) + diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 4888cb3fdef7..c7c83ff0f752 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove); */ void debugfs_remove_recursive(struct dentry *dentry) { - struct dentry *child; - struct dentry *parent; + struct dentry *child, *next, *parent; if (IS_ERR_OR_NULL(dentry)) return; @@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry) return; parent = dentry; + down: mutex_lock(&parent->d_inode->i_mutex); + list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { + if (!debugfs_positive(child)) + continue; - while (1) { - /* - * When all dentries under "parent" has been removed, - * walk up the tree until we reach our starting point. - */ - if (list_empty(&parent->d_subdirs)) { - mutex_unlock(&parent->d_inode->i_mutex); - if (parent == dentry) - break; - parent = parent->d_parent; - mutex_lock(&parent->d_inode->i_mutex); - } - child = list_entry(parent->d_subdirs.next, struct dentry, - d_u.d_child); - next_sibling: - - /* - * If "child" isn't empty, walk down the tree and - * remove all its descendants first. - */ + /* perhaps simple_empty(child) makes more sense */ if (!list_empty(&child->d_subdirs)) { mutex_unlock(&parent->d_inode->i_mutex); parent = child; - mutex_lock(&parent->d_inode->i_mutex); - continue; + goto down; } - __debugfs_remove(child, parent); - if (parent->d_subdirs.next == &child->d_u.d_child) { - /* - * Try the next sibling. - */ - if (child->d_u.d_child.next != &parent->d_subdirs) { - child = list_entry(child->d_u.d_child.next, - struct dentry, - d_u.d_child); - goto next_sibling; - } - - /* - * Avoid infinite loop if we fail to remove - * one dentry. - */ - mutex_unlock(&parent->d_inode->i_mutex); - break; - } - simple_release_fs(&debugfs_mount, &debugfs_mount_count); + up: + if (!__debugfs_remove(child, parent)) + simple_release_fs(&debugfs_mount, &debugfs_mount_count); } - parent = dentry->d_parent; + mutex_unlock(&parent->d_inode->i_mutex); + child = parent; + parent = parent->d_parent; mutex_lock(&parent->d_inode->i_mutex); - __debugfs_remove(dentry, parent); + + if (child != dentry) { + next = list_entry(child->d_u.d_child.next, struct dentry, + d_u.d_child); + goto up; + } + + if (!__debugfs_remove(child, parent)) + simple_release_fs(&debugfs_mount, &debugfs_mount_count); mutex_unlock(&parent->d_inode->i_mutex); - simple_release_fs(&debugfs_mount, &debugfs_mount_count); } EXPORT_SYMBOL_GPL(debugfs_remove_recursive); diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 911649a47dd5..812149119fa3 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -686,7 +686,6 @@ static int device_close(struct inode *inode, struct file *file) device_remove_lockspace() */ sigprocmask(SIG_SETMASK, &tmpsig, NULL); - recalc_sigpending(); return 0; } diff --git a/fs/exec.c b/fs/exec.c index 9c73def87642..fd774c7cb483 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) return -ENOMEM; lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, old_start, old_end); if (new_end > old_start) { /* * when the old and new regions overlap clear from new_end. @@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) free_pgd_range(&tlb, old_start, old_end, new_end, vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); } - tlb_finish_mmu(&tlb, new_end, old_end); + tlb_finish_mmu(&tlb, old_start, old_end); /* * Shrink the vma to just the new range. Always succeeds. diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a61873808f76..72ba4705d4fa 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4412,7 +4412,7 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode) retry: err = ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); - if (err == ENOMEM) { + if (err == -ENOMEM) { cond_resched(); congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index f03598c6ffd3..8bf5999875ee 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -734,11 +734,8 @@ repeat_in_this_group: ino = ext4_find_next_zero_bit((unsigned long *) inode_bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino); - if (ino >= EXT4_INODES_PER_GROUP(sb)) { - if (++group == ngroups) - group = 0; - continue; - } + if (ino >= EXT4_INODES_PER_GROUP(sb)) + goto next_group; if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) { ext4_error(sb, "reserved inode found cleared - " "inode=%lu", ino + 1); @@ -769,6 +766,9 @@ repeat_in_this_group: goto got; /* we grabbed the inode! */ if (ino < EXT4_INODES_PER_GROUP(sb)) goto repeat_in_this_group; +next_group: + if (++group == ngroups) + group = 0; } err = -ENOSPC; goto out; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ba33c67d6e48..dd32a2eacd0d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -555,14 +555,13 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, int ret; unsigned long long status; -#ifdef ES_AGGRESSIVE_TEST - if (retval != map->m_len) { - printk("ES len assertion failed for inode: %lu " - "retval %d != map->m_len %d " - "in %s (lookup)\n", inode->i_ino, retval, - map->m_len, __func__); + if (unlikely(retval != map->m_len)) { + ext4_warning(inode->i_sb, + "ES len assertion failed for inode " + "%lu: retval %d != map->m_len %d", + inode->i_ino, retval, map->m_len); + WARN_ON(1); } -#endif status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; @@ -656,14 +655,13 @@ found: int ret; unsigned long long status; -#ifdef ES_AGGRESSIVE_TEST - if (retval != map->m_len) { - printk("ES len assertion failed for inode: %lu " - "retval %d != map->m_len %d " - "in %s (allocation)\n", inode->i_ino, retval, - map->m_len, __func__); + if (unlikely(retval != map->m_len)) { + ext4_warning(inode->i_sb, + "ES len assertion failed for inode " + "%lu: retval %d != map->m_len %d", + inode->i_ino, retval, map->m_len); + WARN_ON(1); } -#endif /* * If the extent has been zeroed out, we don't need to update @@ -1637,14 +1635,13 @@ add_delayed: int ret; unsigned long long status; -#ifdef ES_AGGRESSIVE_TEST - if (retval != map->m_len) { - printk("ES len assertion failed for inode: %lu " - "retval %d != map->m_len %d " - "in %s (lookup)\n", inode->i_ino, retval, - map->m_len, __func__); + if (unlikely(retval != map->m_len)) { + ext4_warning(inode->i_sb, + "ES len assertion failed for inode " + "%lu: retval %d != map->m_len %d", + inode->i_ino, retval, map->m_len); + WARN_ON(1); } -#endif status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 9491ac0590f7..c0427e2f6648 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2) memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); - memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree)); - memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr)); + ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); + ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); + ext4_es_lru_del(inode1); + ext4_es_lru_del(inode2); isize = i_size_read(inode1); i_size_write(inode1, i_size_read(inode2)); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index bca26f34edf4..b59373b625e9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1359,7 +1359,7 @@ static const struct mount_opts { {Opt_delalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, - MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, + MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_SET}, {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | @@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " - "both data=journal and delalloc"); + "both data=journal and dioread_nolock"); goto failed_mount; } if (test_opt(sb, DELALLOC)) @@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } + if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { + if (test_opt2(sb, EXPLICIT_DELALLOC)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and delalloc"); + err = -EINVAL; + goto restore_opts; + } + if (test_opt(sb, DIOREAD_NOLOCK)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and dioread_nolock"); + err = -EINVAL; + goto restore_opts; + } + } + if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); @@ -5481,6 +5496,7 @@ static void __exit ext4_exit_fs(void) kset_unregister(ext4_kset); ext4_exit_system_zone(); ext4_exit_pageio(); + ext4_exit_es(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); diff --git a/fs/fcntl.c b/fs/fcntl.c index 6599222536eb..65343c3741ff 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -730,14 +730,14 @@ static int __init fcntl_init(void) * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ __O_SYNC | O_DSYNC | FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | - __FMODE_EXEC | O_PATH + __FMODE_EXEC | O_PATH | __O_TMPFILE )); fasync_cache = kmem_cache_create("fasync_cache", diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a3f868ae3fd4..34423978b170 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb, return inode; } +/* + * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never + * be taken from reclaim -- unlike regular filesystems. This needs an + * annotation because huge_pmd_share() does an allocation under + * i_mmap_mutex. + */ +struct lock_class_key hugetlbfs_i_mmap_mutex_key; + static struct inode *hugetlbfs_get_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev) @@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, struct hugetlbfs_inode_info *info; inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); + lockdep_set_class(&inode->i_mapping->i_mmap_mutex, + &hugetlbfs_i_mmap_mutex_key); inode->i_mapping->a_ops = &hugetlbfs_aops; inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 01bfe7662751..41e491b8e5d7 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init) nlm_init->protocol, nlm_version, nlm_init->hostname, nlm_init->noresvport, nlm_init->net); - if (host == NULL) { - lockd_down(nlm_init->net); - return ERR_PTR(-ENOLCK); - } + if (host == NULL) + goto out_nohost; + if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL) + goto out_nobind; return host; +out_nobind: + nlmclnt_release_host(host); +out_nohost: + lockd_down(nlm_init->net); + return ERR_PTR(-ENOLCK); } EXPORT_SYMBOL_GPL(nlmclnt_init); diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 9760ecb9b60f..acd394716349 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_args *argp = &req->a_args; struct nlm_lock *lock = &argp->lock; + char *nodename = req->a_host->h_rpcclnt->cl_nodename; nlmclnt_next_cookie(&argp->cookie); memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh)); - lock->caller = utsname()->nodename; + lock->caller = nodename; lock->oh.data = req->a_owner; lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", (unsigned int)fl->fl_u.nfs_fl.owner->pid, - utsname()->nodename); + nodename); lock->svid = fl->fl_u.nfs_fl.owner->pid; lock->fl.fl_start = fl->fl_start; lock->fl.fl_end = fl->fl_end; diff --git a/fs/namei.c b/fs/namei.c index 8b61d103a8a7..89a612e392eb 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3671,15 +3671,11 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* - * To use null names we require CAP_DAC_READ_SEARCH - * This ensures that not everyone will be able to create - * handlink using the passed filedescriptor. + * Using empty names is equivalent to using AT_SYMLINK_FOLLOW + * on /proc/self/fd/<fd>. */ - if (flags & AT_EMPTY_PATH) { - if (!capable(CAP_DAC_READ_SEARCH)) - return -ENOENT; + if (flags & AT_EMPTY_PATH) how = LOOKUP_EMPTY; - } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index af6e806044d7..941246f2b43d 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -463,7 +463,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st unlock_new_inode(inode); } else nfs_refresh_inode(inode, fattr); - nfs_setsecurity(inode, fattr, label); dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n", inode->i_sb->s_id, (long long)NFS_FILEID(inode), @@ -963,9 +962,15 @@ EXPORT_SYMBOL_GPL(nfs_revalidate_inode); static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) { struct nfs_inode *nfsi = NFS_I(inode); - + int ret; + if (mapping->nrpages != 0) { - int ret = invalidate_inode_pages2(mapping); + if (S_ISREG(inode->i_mode)) { + ret = nfs_sync_mapping(mapping); + if (ret < 0) + return ret; + } + ret = invalidate_inode_pages2(mapping); if (ret < 0) return ret; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index cf11799297c4..108a774095f7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3071,15 +3071,13 @@ struct rpc_clnt * nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { + struct rpc_clnt *client = NFS_CLIENT(dir); int status; - struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); - if (status < 0) { - rpc_shutdown_client(client); + if (status < 0) return ERR_PTR(status); - } - return client; + return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; } static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 71fdc0dfa0d2..f6db66d8f647 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2478,6 +2478,10 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server, if (server->flags & NFS_MOUNT_NOAC) sb_mntdata.mntflags |= MS_SYNCHRONOUS; + if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL) + if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS) + sb_mntdata.mntflags |= MS_SYNCHRONOUS; + /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata); if (IS_ERR(s)) { diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 0d4c410e4589..419572f33b72 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1524,7 +1524,7 @@ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) { return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\ - 1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\ + 1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\ 2 + /*eir_server_owner.so_minor_id */\ /* eir_server_owner.so_major_id<> */\ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\ diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 280acef6f0dc..43f42290e5df 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1264,6 +1264,8 @@ static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) struct svc_cred *cr = &rqstp->rq_cred; u32 service; + if (!cr->cr_gss_mech) + return false; service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); return service == RPC_GSS_SVC_INTEGRITY || service == RPC_GSS_SVC_PRIVACY; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 0c0f3ea90de5..c2a4701d7286 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3360,7 +3360,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr, 8 /* eir_clientid */ + 4 /* eir_sequenceid */ + 4 /* eir_flags */ + - 4 /* spr_how (SP4_NONE) */ + + 4 /* spr_how */ + + 8 /* spo_must_enforce, spo_must_allow */ + 8 /* so_minor_id */ + 4 /* so_major_id.len */ + (XDR_QUADLEN(major_id_sz) * 4) + @@ -3372,8 +3373,6 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr, WRITE32(exid->seqid); WRITE32(exid->flags); - /* state_protect4_r. Currently only support SP4_NONE */ - BUG_ON(exid->spa_how != SP4_NONE); WRITE32(exid->spa_how); switch (exid->spa_how) { case SP4_NONE: diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 79736a28d84f..2abf97b2a592 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1757,7 +1757,7 @@ try_again: goto out; } else if (ret == 1) { clusters_need = wc->w_clen; - ret = ocfs2_refcount_cow(inode, filp, di_bh, + ret = ocfs2_refcount_cow(inode, di_bh, wc->w_cpos, wc->w_clen, UINT_MAX); if (ret) { mlog_errno(ret); diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index eb760d8acd50..30544ce8e9f7 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode) { int ret; struct ocfs2_empty_dir_priv priv = { - .ctx.actor = ocfs2_empty_dir_filldir + .ctx.actor = ocfs2_empty_dir_filldir, }; - memset(&priv, 0, sizeof(priv)); - if (ocfs2_dir_indexed(inode)) { ret = ocfs2_empty_dir_dx(inode, &priv); if (ret) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41000f223ca4..3261d71319ee 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode, if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) goto out; - return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1); + return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1); out: return status; @@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode, zero_clusters = last_cpos - zero_cpos; if (needs_cow) { - rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos, + rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, zero_clusters, UINT_MAX); if (rc) { mlog_errno(rc); @@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode, *meta_level = 1; - ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX); + ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); if (ret) mlog_errno(ret); out: diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 96f9ac237e86..0a992737dcaf 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb, extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + - ocfs2_quota_trans_credits(sb) + bits_wanted; + ocfs2_quota_trans_credits(sb); } static inline int ocfs2_calc_symlink_credits(struct super_block *sb) diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index f1fc172175b6..452068b45749 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle, u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); - ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, + ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos, p_cpos, new_p_cpos, len); if (ret) { mlog_errno(ret); diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 9f6b96a09615..a70d604593b6 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -49,7 +49,6 @@ struct ocfs2_cow_context { struct inode *inode; - struct file *file; u32 cow_start; u32 cow_len; struct ocfs2_extent_tree data_et; @@ -66,7 +65,7 @@ struct ocfs2_cow_context { u32 *num_clusters, unsigned int *extent_flags); int (*cow_duplicate_clusters)(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len); }; @@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) } int ocfs2_duplicate_clusters_by_page(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len) { int ret = 0, partial; - struct inode *inode = file_inode(file); - struct ocfs2_caching_info *ci = INODE_CACHE(inode); - struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + struct super_block *sb = inode->i_sb; u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); struct page *page; pgoff_t page_index; @@ -2978,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) BUG_ON(PageDirty(page)); - if (PageReadahead(page)) { - page_cache_async_readahead(mapping, - &file->f_ra, file, - page, page_index, - readahead_pages); - } - if (!PageUptodate(page)) { ret = block_read_full_page(page, ocfs2_get_block); if (ret) { @@ -3004,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, } } - ocfs2_map_and_dirty_page(inode, handle, from, to, + ocfs2_map_and_dirty_page(inode, + handle, from, to, page, 0, &new_block); mark_page_accessed(page); unlock: @@ -3020,12 +3011,11 @@ unlock: } int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len) { int ret = 0; - struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; struct ocfs2_caching_info *ci = INODE_CACHE(inode); int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); @@ -3150,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle, /*If the old clusters is unwritten, no need to duplicate. */ if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { - ret = context->cow_duplicate_clusters(handle, context->file, + ret = context->cow_duplicate_clusters(handle, context->inode, cpos, old, new, len); if (ret) { mlog_errno(ret); @@ -3428,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context) return ret; } -static void ocfs2_readahead_for_cow(struct inode *inode, - struct file *file, - u32 start, u32 len) -{ - struct address_space *mapping; - pgoff_t index; - unsigned long num_pages; - int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits; - - if (!file) - return; - - mapping = file->f_mapping; - num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT; - if (!num_pages) - num_pages = 1; - - index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT; - page_cache_sync_readahead(mapping, &file->f_ra, file, - index, num_pages); -} - /* * Starting at cpos, try to CoW write_len clusters. Don't CoW * past max_cpos. This will stop when it runs into a hole or an * unrefcounted extent. */ static int ocfs2_refcount_cow_hunk(struct inode *inode, - struct file *file, struct buffer_head *di_bh, u32 cpos, u32 write_len, u32 max_cpos) { @@ -3485,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, BUG_ON(cow_len == 0); - ocfs2_readahead_for_cow(inode, file, cow_start, cow_len); - context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); if (!context) { ret = -ENOMEM; @@ -3508,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, context->ref_root_bh = ref_root_bh; context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; context->get_clusters = ocfs2_di_get_clusters; - context->file = file; ocfs2_init_dinode_extent_tree(&context->data_et, INODE_CACHE(inode), di_bh); @@ -3537,7 +3501,6 @@ out: * clusters between cpos and cpos+write_len are safe to modify. */ int ocfs2_refcount_cow(struct inode *inode, - struct file *file, struct buffer_head *di_bh, u32 cpos, u32 write_len, u32 max_cpos) { @@ -3557,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode, num_clusters = write_len; if (ext_flags & OCFS2_EXT_REFCOUNTED) { - ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos, + ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, num_clusters, max_cpos); if (ret) { mlog_errno(ret); diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h index 7754608c83a4..6422bbcdb525 100644 --- a/fs/ocfs2/refcounttree.h +++ b/fs/ocfs2/refcounttree.h @@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode, int *credits, int *ref_blocks); int ocfs2_refcount_cow(struct inode *inode, - struct file *filep, struct buffer_head *di_bh, + struct buffer_head *di_bh, u32 cpos, u32 write_len, u32 max_cpos); typedef int (ocfs2_post_refcount_func)(struct inode *inode, @@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode, u32 cpos, u32 write_len, struct ocfs2_post_refcount *post); int ocfs2_duplicate_clusters_by_page(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len); int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, - struct file *file, + struct inode *inode, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len); int ocfs2_cow_sync_writeback(struct super_block *sb, diff --git a/fs/open.c b/fs/open.c index d53e29895082..7931f76acc2b 100644 --- a/fs/open.c +++ b/fs/open.c @@ -823,7 +823,7 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o int lookup_flags = 0; int acc_mode; - if (flags & O_CREAT) + if (flags & (O_CREAT | __O_TMPFILE)) op->mode = (mode & S_IALLUGO) | S_IFREG; else op->mode = 0; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dbf61f6174f0..107d026f5d6e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, * of how soft-dirty works. */ pte_t ptent = *pte; - ptent = pte_wrprotect(ptent); - ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); + + if (pte_present(ptent)) { + ptent = pte_wrprotect(ptent); + ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); + } else if (is_swap_pte(ptent)) { + ptent = pte_swp_clear_soft_dirty(ptent); + } else if (pte_file(ptent)) { + ptent = pte_file_clear_soft_dirty(ptent); + } + set_pte_at(vma->vm_mm, addr, pte, ptent); #endif } @@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; - if (!pte_present(ptent)) - continue; if (cp->type == CLEAR_REFS_SOFT_DIRTY) { clear_soft_dirty(vma, addr, pte); continue; } + if (!pte_present(ptent)) + continue; + page = vm_normal_page(vma, addr, ptent); if (!page) continue; @@ -859,7 +868,7 @@ typedef struct { } pagemap_entry_t; struct pagemapread { - int pos, len; + int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ pagemap_entry_t *buffer; bool v2; }; @@ -867,7 +876,7 @@ struct pagemapread { #define PAGEMAP_WALK_SIZE (PMD_SIZE) #define PAGEMAP_WALK_MASK (PMD_MASK) -#define PM_ENTRY_BYTES sizeof(u64) +#define PM_ENTRY_BYTES sizeof(pagemap_entry_t) #define PM_STATUS_BITS 3 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) @@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, flags = PM_PRESENT; page = vm_normal_page(vma, addr, pte); } else if (is_swap_pte(pte)) { - swp_entry_t entry = pte_to_swp_entry(pte); - + swp_entry_t entry; + if (pte_swp_soft_dirty(pte)) + flags2 |= __PM_SOFT_DIRTY; + entry = pte_to_swp_entry(pte); frame = swp_type(entry) | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); flags = PM_SWAP; @@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, goto out_task; pm.v2 = soft_dirty_cleared; - pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); - pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); + pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); + pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); ret = -ENOMEM; if (!pm.buffer) goto out_task; diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index 33532f79b4f7..a958444a75fc 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -19,12 +19,13 @@ /* * LOCKING: * - * We rely on new Alexander Viro's super-block locking. + * These guys are evicted from procfs as the very first step in ->kill_sb(). * */ -static int show_version(struct seq_file *m, struct super_block *sb) +static int show_version(struct seq_file *m, void *unused) { + struct super_block *sb = m->private; char *format; if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) { @@ -66,8 +67,9 @@ static int show_version(struct seq_file *m, struct super_block *sb) #define DJP( x ) le32_to_cpu( jp -> x ) #define JF( x ) ( r -> s_journal -> x ) -static int show_super(struct seq_file *m, struct super_block *sb) +static int show_super(struct seq_file *m, void *unused) { + struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); seq_printf(m, "state: \t%s\n" @@ -128,8 +130,9 @@ static int show_super(struct seq_file *m, struct super_block *sb) return 0; } -static int show_per_level(struct seq_file *m, struct super_block *sb) +static int show_per_level(struct seq_file *m, void *unused) { + struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); int level; @@ -186,8 +189,9 @@ static int show_per_level(struct seq_file *m, struct super_block *sb) return 0; } -static int show_bitmap(struct seq_file *m, struct super_block *sb) +static int show_bitmap(struct seq_file *m, void *unused) { + struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); seq_printf(m, "free_block: %lu\n" @@ -218,8 +222,9 @@ static int show_bitmap(struct seq_file *m, struct super_block *sb) return 0; } -static int show_on_disk_super(struct seq_file *m, struct super_block *sb) +static int show_on_disk_super(struct seq_file *m, void *unused) { + struct super_block *sb = m->private; struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); struct reiserfs_super_block *rs = sb_info->s_rs; int hash_code = DFL(s_hash_function_code); @@ -261,8 +266,9 @@ static int show_on_disk_super(struct seq_file *m, struct super_block *sb) return 0; } -static int show_oidmap(struct seq_file *m, struct super_block *sb) +static int show_oidmap(struct seq_file *m, void *unused) { + struct super_block *sb = m->private; struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); struct reiserfs_super_block *rs = sb_info->s_rs; unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize); @@ -291,8 +297,9 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb) return 0; } -static int show_journal(struct seq_file *m, struct super_block *sb) +static int show_journal(struct seq_file *m, void *unused) { + struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); struct reiserfs_super_block *rs = r->s_rs; struct journal_params *jp = &rs->s_v1.s_journal; @@ -383,92 +390,24 @@ static int show_journal(struct seq_file *m, struct super_block *sb) return 0; } -/* iterator */ -static int test_sb(struct super_block *sb, void *data) -{ - return data == sb; -} - -static int set_sb(struct super_block *sb, void *data) -{ - return -ENOENT; -} - -struct reiserfs_seq_private { - struct super_block *sb; - int (*show) (struct seq_file *, struct super_block *); -}; - -static void *r_start(struct seq_file *m, loff_t * pos) -{ - struct reiserfs_seq_private *priv = m->private; - loff_t l = *pos; - - if (l) - return NULL; - - if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb))) - return NULL; - - up_write(&priv->sb->s_umount); - return priv->sb; -} - -static void *r_next(struct seq_file *m, void *v, loff_t * pos) -{ - ++*pos; - if (v) - deactivate_super(v); - return NULL; -} - -static void r_stop(struct seq_file *m, void *v) -{ - if (v) - deactivate_super(v); -} - -static int r_show(struct seq_file *m, void *v) -{ - struct reiserfs_seq_private *priv = m->private; - return priv->show(m, v); -} - -static const struct seq_operations r_ops = { - .start = r_start, - .next = r_next, - .stop = r_stop, - .show = r_show, -}; - static int r_open(struct inode *inode, struct file *file) { - struct reiserfs_seq_private *priv; - int ret = seq_open_private(file, &r_ops, - sizeof(struct reiserfs_seq_private)); - - if (!ret) { - struct seq_file *m = file->private_data; - priv = m->private; - priv->sb = proc_get_parent_data(inode); - priv->show = PDE_DATA(inode); - } - return ret; + return single_open(file, PDE_DATA(inode), + proc_get_parent_data(inode)); } static const struct file_operations r_file_operations = { .open = r_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, - .owner = THIS_MODULE, + .release = single_release, }; static struct proc_dir_entry *proc_info_root = NULL; static const char proc_info_root_name[] = "fs/reiserfs"; static void add_file(struct super_block *sb, char *name, - int (*func) (struct seq_file *, struct super_block *)) + int (*func) (struct seq_file *, void *)) { proc_create_data(name, 0, REISERFS_SB(sb)->procdir, &r_file_operations, func); diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index f8a23c3078f8..e2e202a07b31 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -499,6 +499,7 @@ int remove_save_link(struct inode *inode, int truncate) static void reiserfs_kill_sb(struct super_block *s) { if (REISERFS_SB(s)) { + reiserfs_proc_info_done(s); /* * Force any pending inode evictions to occur now. Any * inodes to be removed that have extended attributes @@ -554,8 +555,6 @@ static void reiserfs_put_super(struct super_block *s) REISERFS_SB(s)->reserved_blocks); } - reiserfs_proc_info_done(s); - reiserfs_write_unlock(s); mutex_destroy(&REISERFS_SB(s)->lock); kfree(s->s_fs_info); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 56e6b68c8d2f..94383a70c1a3 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -274,15 +274,12 @@ struct acpi_device_wakeup { }; struct acpi_device_physical_node { - u8 node_id; + unsigned int node_id; struct list_head node; struct device *dev; bool put_online:1; }; -/* set maximum of physical nodes to 32 for expansibility */ -#define ACPI_MAX_PHYSICAL_NODE 32 - /* Device */ struct acpi_device { int device_type; @@ -302,10 +299,9 @@ struct acpi_device { struct acpi_driver *driver; void *driver_data; struct device dev; - u8 physical_node_count; + unsigned int physical_node_count; struct list_head physical_node_list; struct mutex physical_node_lock; - DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE); struct list_head power_dependent; void (*remove)(struct acpi_device *); }; @@ -445,7 +441,11 @@ struct acpi_pci_root { }; /* helper */ -acpi_handle acpi_get_child(acpi_handle, u64); +acpi_handle acpi_find_child(acpi_handle, u64, bool); +static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr) +{ + return acpi_find_child(handle, addr, false); +} int acpi_is_root_bridge(acpi_handle); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev)) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 2f47ade1b567..0807ddf97b05 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) { return pmd; } + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_file_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_file_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_file_soft_dirty(pte_t pte) +{ + return 0; +} #endif #ifndef __HAVE_PFNMAP_TRACKING diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 13821c339a41..5672d7ea1fa0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -112,7 +112,7 @@ struct mmu_gather { #define HAVE_GENERIC_MMU_GATHER -void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4372658c73ae..120d57a1c3a5 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -78,6 +78,11 @@ struct trace_iterator { /* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; + cpumask_var_t started; + + /* it's true when current open file is snapshot */ + bool snapshot; + /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; @@ -90,10 +95,7 @@ struct trace_iterator { loff_t pos; long idx; - cpumask_var_t started; - - /* it's true when current open file is snapshot */ - bool snapshot; + /* All new field here will be zeroed out in pipe_read */ }; enum trace_iter_flags { @@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type); extern int trace_add_event_call(struct ftrace_event_call *call); -extern void trace_remove_event_call(struct ftrace_event_call *call); +extern int trace_remove_event_call(struct ftrace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 3869c525b052..369cf2cd5144 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -8,6 +8,7 @@ */ #include <linux/irq.h> #include <linux/module.h> +#include <linux/atomic.h> #ifndef _IIO_TRIGGER_H_ #define _IIO_TRIGGER_H_ @@ -61,7 +62,7 @@ struct iio_trigger { struct list_head list; struct list_head alloc_list; - int use_count; + atomic_t use_count; struct irq_chip subirq_chip; int subirq_base; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3bef14c6586b..482ad2d84a32 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); static inline void tracing_start(void) { } static inline void tracing_stop(void) { } static inline void ftrace_off_permanent(void) { } -static inline void trace_dump_stack(void) { } +static inline void trace_dump_stack(int skip) { } static inline void tracing_on(void) { } static inline void tracing_off(void) { } diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 8d73fe29796a..db1791bb997a 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -113,11 +113,27 @@ #define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3) #define CNTRLREG_TSCENB BIT(7) +/* FIFO READ Register */ +#define FIFOREAD_DATA_MASK (0xfff << 0) +#define FIFOREAD_CHNLID_MASK (0xf << 16) + +/* Sequencer Status */ +#define SEQ_STATUS BIT(5) + #define ADC_CLK 3000000 #define MAX_CLK_DIV 7 #define TOTAL_STEPS 16 #define TOTAL_CHANNELS 8 +/* +* ADC runs at 3MHz, and it takes +* 15 cycles to latch one data output. +* Hence the idle time for ADC to +* process one sample data would be +* around 5 micro seconds. +*/ +#define IDLE_TIMEOUT 5 /* microsec */ + #define TSCADC_CELLS 2 struct ti_tscadc_dev { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 737685e9e852..68029b30c3dc 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -309,21 +309,20 @@ struct mlx5_hca_cap { __be16 max_desc_sz_rq; u8 rsvd21[2]; __be16 max_desc_sz_sq_dc; - u8 rsvd22[4]; - __be16 max_qp_mcg; - u8 rsvd23; + __be32 max_qp_mcg; + u8 rsvd22[3]; u8 log_max_mcg; - u8 rsvd24; + u8 rsvd23; u8 log_max_pd; - u8 rsvd25; + u8 rsvd24; u8 log_max_xrcd; - u8 rsvd26[42]; + u8 rsvd25[42]; __be16 log_uar_page_sz; - u8 rsvd27[28]; + u8 rsvd26[28]; u8 log_msx_atomic_size_qp; - u8 rsvd28[2]; + u8 rsvd27[2]; u8 log_msx_atomic_size_dc; - u8 rsvd29[76]; + u8 rsvd28[76]; }; @@ -472,9 +471,8 @@ struct mlx5_eqe_cmd { struct mlx5_eqe_page_req { u8 rsvd0[2]; __be16 func_id; - u8 rsvd1[2]; - __be16 num_pages; - __be32 rsvd2[5]; + __be32 num_pages; + __be32 rsvd1[5]; }; union ev_data { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2aa258b0ced1..8888381fc150 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -358,7 +358,7 @@ struct mlx5_caps { u32 reserved_lkey; u8 local_ca_ack_delay; u8 log_max_mcg; - u16 max_qp_mcg; + u32 max_qp_mcg; int min_page_sz; }; @@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s16 npages); + s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); @@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); -typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); -int mlx5_register_health_report_handler(health_handler_t handler); -void mlx5_unregister_health_report_handler(void); const char *mlx5_command_str(int command); int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 75981d0b57dc..580a5320cc96 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -15,6 +15,7 @@ #include <linux/list.h> #include <linux/rbtree.h> +#include <linux/err.h> struct module; struct device; diff --git a/include/linux/sched.h b/include/linux/sched.h index d722490da030..e9995eb5985c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -314,6 +314,7 @@ struct nsproxy; struct user_namespace; #ifdef CONFIG_MMU +extern unsigned long mmap_legacy_base(void); extern void arch_pick_mmap_layout(struct mm_struct *mm); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, @@ -1532,6 +1533,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) * Test if a process is not yet dead (at most zombie state) * If pid_alive fails, then pointers within the task structure * can be stale and must not be dereferenced. + * + * Return: 1 if the process is alive. 0 otherwise. */ static inline int pid_alive(struct task_struct *p) { @@ -1543,6 +1546,8 @@ static inline int pid_alive(struct task_struct *p) * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. + * + * Return: 1 if the task structure is init. 0 otherwise. */ static inline int is_global_init(struct task_struct *tsk) { @@ -1894,6 +1899,8 @@ extern struct task_struct *idle_task(int cpu); /** * is_idle_task - is the specified task an idle task? * @p: the task in question. + * + * Return: 1 if @p is an idle task. 0 otherwise. */ static inline bool is_idle_task(const struct task_struct *p) { diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7d537ced949a..75f34949d9ab 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -117,9 +117,17 @@ do { \ #endif /*arch_spin_is_contended*/ #endif -/* The lock does not imply full memory barrier. */ -#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK -static inline void smp_mb__after_lock(void) { smp_mb(); } +/* + * Despite its name it doesn't necessarily has to be a full barrier. + * It should only guarantee that a STORE before the critical section + * can not be reordered with a LOAD inside this section. + * spin_lock() is the one-way barrier, this LOAD can not escape out + * of the region. So the default implementation simply ensures that + * a STORE can not move into the critical section, smp_wmb() should + * serialize it with another STORE done by spin_lock(). + */ +#ifndef smp_mb__before_spinlock +#define smp_mb__before_spinlock() smp_wmb() #endif /** diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 6d870353674a..1821445708d6 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -121,6 +121,7 @@ struct rpc_task_setup { #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ #define RPC_TASK_SENT 0x0800 /* message was sent */ #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ +#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) diff --git a/include/linux/swapops.h b/include/linux/swapops.h index c5fd30d2a415..8d4fa82bfb91 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte) swp_entry_t arch_entry; BUG_ON(pte_file(pte)); + if (pte_swp_soft_dirty(pte)) + pte = pte_swp_clear_soft_dirty(pte); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 4147d700a293..84662ecc7b51 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void); asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, int __user *); #else +#ifdef CONFIG_CLONE_BACKWARDS3 +asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, + int __user *, int); +#else asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int __user *, int); #endif +#endif asmlinkage long sys_execve(const char __user *filename, const char __user *const __user *argv, diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index b6b215f13b45..14105c26a836 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -23,6 +23,7 @@ struct user_namespace { struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; + int level; kuid_t owner; kgid_t group; unsigned int proc_inum; diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 7343a27fe819..47ada23345a1 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -22,6 +22,7 @@ #define _V4L2_CTRLS_H #include <linux/list.h> +#include <linux/mutex.h> #include <linux/videodev2.h> /* forward references */ diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index f18b91966d3d..8a358a2c97e6 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock) if (rc > 0) /* local bh are disabled so it is ok to use _BH */ NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_LOWLATENCYRXPACKETS, rc); + LINUX_MIB_BUSYPOLLRXPACKETS, rc); } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && !need_resched() && !busy_loop_timeout(end_time)); @@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk) return false; } -static inline bool sk_busy_poll(struct sock *sk, int nonblock) -{ - return false; -} - static inline void skb_mark_napi_id(struct sk_buff *skb, struct napi_struct *napi) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 5a76f2bef822..0ce316bb3c65 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, return INET_ECN_encapsulate(tos, inner); } -static inline void tunnel_ip_select_ident(struct sk_buff *skb, - const struct iphdr *old_iph, - struct dst_entry *dst) -{ - struct iphdr *iph = ip_hdr(skb); - - /* Use inner packet iph-id if possible. */ - if (skb->protocol == htons(ETH_P_IP) && old_iph->id) - iph->id = old_iph->id; - else - __ip_select_ident(iph, dst, - (skb_shinfo(skb)->gso_segs ?: 1) - 1); -} - int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); int iptunnel_xmit(struct net *net, struct rtable *rt, struct sk_buff *skb, diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index ffc9d883b163..76368c9d4503 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -685,13 +685,19 @@ struct psched_ratecfg { u64 rate_bytes_ps; /* bytes per second */ u32 mult; u16 overhead; + u8 linklayer; u8 shift; }; static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, unsigned int len) { - return ((u64)(len + r->overhead) * r->mult) >> r->shift; + len += r->overhead; + + if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) + return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; + + return ((u64)len * r->mult) >> r->shift; } void psched_ratecfg_precompute(struct psched_ratecfg *r, @@ -703,6 +709,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, memset(res, 0, sizeof(*res)); res->rate = r->rate_bytes_ps; res->overhead = r->overhead; + res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); } #endif diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index dbd71b0c7d8c..09d62b9228ff 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -73,9 +73,17 @@ struct tc_estimator { #define TC_H_ROOT (0xFFFFFFFFU) #define TC_H_INGRESS (0xFFFFFFF1U) +/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ +enum tc_link_layer { + TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ + TC_LINKLAYER_ETHERNET, + TC_LINKLAYER_ATM, +}; +#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ + struct tc_ratespec { unsigned char cell_log; - unsigned char __reserved; + __u8 linklayer; /* lower 4 bits */ unsigned short overhead; short cell_align; unsigned short mpu; diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 60601d28da75..1bdb4a39d1e1 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -257,7 +257,7 @@ enum LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ - LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ + LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */ __LINUX_MIB_MAX }; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 789ec4683db3..781845a013ab 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4335,8 +4335,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, } err = percpu_ref_init(&css->refcnt, css_release); - if (err) + if (err) { + ss->css_free(cgrp); goto err_free_all; + } init_cgroup_css(css, ss, cgrp); diff --git a/kernel/fork.c b/kernel/fork.c index 403d2bb8a968..e23bb19e2a3e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val) +#elif defined(CONFIG_CLONE_BACKWARDS3) +SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, + int, stack_size, + int __user *, parent_tidptr, + int __user *, child_tidptr, + int, tls_val) #else SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86eb..a52ee7bb830d 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) might_sleep(); ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, &ctx->dep_map, _RET_IP_, ctx); - if (!ret && ctx->acquired > 0) + if (!ret && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); return ret; @@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, &ctx->dep_map, _RET_IP_, ctx); - if (!ret && ctx->acquired > 0) + if (!ret && ctx->acquired > 1) return ww_mutex_deadlock_injection(lock, ctx); return ret; diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 06fe28589e9c..a394297f8b2f 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req) } EXPORT_SYMBOL_GPL(pm_qos_request_active); +static void __pm_qos_update_request(struct pm_qos_request *req, + s32 new_value) +{ + trace_pm_qos_update_request(req->pm_qos_class, new_value); + + if (new_value != req->node.prio) + pm_qos_update_target( + pm_qos_array[req->pm_qos_class]->constraints, + &req->node, PM_QOS_UPDATE_REQ, new_value); +} + /** * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout * @work: work struct for the delayed work (timeout) @@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work) struct pm_qos_request, work); - pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); + __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); } /** @@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req, } cancel_delayed_work_sync(&req->work); - - trace_pm_qos_update_request(req->pm_qos_class, new_value); - if (new_value != req->node.prio) - pm_qos_update_target( - pm_qos_array[req->pm_qos_class]->constraints, - &req->node, PM_QOS_UPDATE_REQ, new_value); + __pm_qos_update_request(req, new_value); } EXPORT_SYMBOL_GPL(pm_qos_update_request); diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c index b51087fb9ace..276762f3a460 100644 --- a/kernel/printk/braille.c +++ b/kernel/printk/braille.c @@ -19,7 +19,8 @@ char *_braille_console_setup(char **str, char **brl_options) pr_err("need port name after brl=\n"); else *((*str)++) = 0; - } + } else + return NULL; return *str; } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 4041f5747e73..a146ee327f6a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -469,7 +469,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data) /* Architecture-specific hardware disable .. */ ptrace_disable(child); clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - flush_ptrace_hw_breakpoint(child); write_lock_irq(&tasklist_lock); /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7c32cb7bfeb..05c39f030314 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p) /** * task_curr - is this task currently executing on a CPU? * @p: the task in question. + * + * Return: 1 if the task is currently executing. 0 otherwise. */ inline int task_curr(const struct task_struct *p) { @@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu) * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * - * Returns %true if @p was woken up, %false if it was already running + * Return: %true if @p was woken up, %false if it was already running. * or @state didn't match @p's state. */ static int @@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) unsigned long flags; int cpu, success = 0; - smp_wmb(); + /* + * If we are going to wake up a thread waiting for CONDITION we + * need to ensure that CONDITION=1 done by the caller can not be + * reordered with p->state check below. This pairs with mb() in + * set_current_state() the waiting thread does. + */ + smp_mb__before_spinlock(); raw_spin_lock_irqsave(&p->pi_lock, flags); if (!(p->state & state)) goto out; @@ -1577,8 +1585,9 @@ out: * @p: The process to be woken up. * * Attempt to wake up the nominated process and move it to the set of runnable - * processes. Returns 1 if the process was woken up, 0 if it was already - * running. + * processes. + * + * Return: 1 if the process was woken up, 0 if it was already running. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. @@ -2191,6 +2200,8 @@ void scheduler_tick(void) * This makes sure that uptime, CFS vruntime, load * balancing, etc... continue to move forward, even * with a very low granularity. + * + * Return: Maximum deferment in nanoseconds. */ u64 scheduler_tick_max_deferment(void) { @@ -2394,6 +2405,12 @@ need_resched: if (sched_feat(HRTICK)) hrtick_clear(rq); + /* + * Make sure that signal_pending_state()->signal_pending() below + * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) + * done by the caller to avoid the race with signal_wake_up(). + */ + smp_mb__before_spinlock(); raw_spin_lock_irq(&rq->lock); switch_count = &prev->nivcsw; @@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion); * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. * - * The return value is 0 if timed out, and positive (at least 1, or number of - * jiffies left till timeout) if completed. + * Return: 0 if timed out, and positive (at least 1, or number of jiffies left + * till timeout) if completed. */ unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) @@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io); * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. The caller is accounted as waiting for IO. * - * The return value is 0 if timed out, and positive (at least 1, or number of - * jiffies left till timeout) if completed. + * Return: 0 if timed out, and positive (at least 1, or number of jiffies left + * till timeout) if completed. */ unsigned long __sched wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) @@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout); * This waits for completion of a specific task to be signaled. It is * interruptible. * - * The return value is -ERESTARTSYS if interrupted, 0 if completed. + * Return: -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_interruptible(struct completion *x) { @@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. It is interruptible. The timeout is in jiffies. * - * The return value is -ERESTARTSYS if interrupted, 0 if timed out, - * positive (at least 1, or number of jiffies left till timeout) if completed. + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, + * or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_interruptible_timeout(struct completion *x, @@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); * This waits to be signaled for completion of a specific task. It can be * interrupted by a kill signal. * - * The return value is -ERESTARTSYS if interrupted, 0 if completed. + * Return: -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_killable(struct completion *x) { @@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable); * signaled or for a specified timeout to expire. It can be * interrupted by a kill signal. The timeout is in jiffies. * - * The return value is -ERESTARTSYS if interrupted, 0 if timed out, - * positive (at least 1, or number of jiffies left till timeout) if completed. + * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, + * or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_killable_timeout(struct completion *x, @@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout); * try_wait_for_completion - try to decrement a completion without blocking * @x: completion structure * - * Returns: 0 if a decrement cannot be done without blocking + * Return: 0 if a decrement cannot be done without blocking * 1 if a decrement succeeded. * * If a completion is being used as a counting completion, @@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion); * completion_done - Test to see if a completion has any waiters * @x: completion structure * - * Returns: 0 if there are waiters (wait_for_completion() in progress) + * Return: 0 if there are waiters (wait_for_completion() in progress) * 1 if there are no waiters. * */ @@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment) * task_prio - return the priority value of a given task. * @p: the task in question. * - * This is the priority value as seen by users in /proc. + * Return: The priority value as seen by users in /proc. * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ @@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p) /** * task_nice - return the nice value of a given task. * @p: the task in question. + * + * Return: The nice value [ -20 ... 0 ... 19 ]. */ int task_nice(const struct task_struct *p) { @@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice); /** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. + * + * Return: 1 if the CPU is currently idle. 0 otherwise. */ int idle_cpu(int cpu) { @@ -3226,6 +3247,8 @@ int idle_cpu(int cpu) /** * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. + * + * Return: The idle task for the cpu @cpu. */ struct task_struct *idle_task(int cpu) { @@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu) /** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. + * + * The task of @pid, if found. %NULL otherwise. */ static struct task_struct *find_process_by_pid(pid_t pid) { @@ -3432,6 +3457,8 @@ recheck: * @policy: new policy. * @param: structure containing the new RT priority. * + * Return: 0 on success. An error code otherwise. + * * NOTE that the task may be already dead. */ int sched_setscheduler(struct task_struct *p, int policy, @@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); * current context has permission. For example, this is needed in * stop_machine(): we create temporary high priority worker threads, * but our caller might not have that capability. + * + * Return: 0 on success. An error code otherwise. */ int sched_setscheduler_nocheck(struct task_struct *p, int policy, const struct sched_param *param) @@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) * @pid: the pid in question. * @policy: new policy. * @param: structure containing the new RT priority. + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) @@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) { @@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) /** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. + * + * Return: On success, the policy of the thread. Otherwise, a negative error + * code. */ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) { @@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) * sys_sched_getparam - get the RT priority of a thread * @pid: the pid in question. * @param: structure containing the RT priority. + * + * Return: On success, 0 and the RT priority is in @param. Otherwise, an error + * code. */ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) { @@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) @@ -3710,6 +3751,8 @@ out_unlock: * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask + * + * Return: 0 on success. An error code otherwise. */ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) @@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, * * This function yields the current CPU to other tasks. If there are no * other threads running on this CPU then this function will return. + * + * Return: 0. */ SYSCALL_DEFINE0(sched_yield) { @@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield); * It's the caller's job to ensure that the target task struct * can't go away on us before we can do any checks. * - * Returns: + * Return: * true (>0) if we indeed boosted the target task. * false (0) if we failed to boost the target. * -ESRCH if there's no task to yield to. @@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout) * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * - * this syscall returns the maximum rt_priority that can be used - * by a given scheduling class. + * Return: On success, this syscall returns the maximum + * rt_priority that can be used by a given scheduling class. + * On failure, a negative error code is returned. */ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) { @@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * - * this syscall returns the minimum rt_priority that can be used - * by a given scheduling class. + * Return: On success, this syscall returns the minimum + * rt_priority that can be used by a given scheduling class. + * On failure, a negative error code is returned. */ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) { @@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) * * this syscall writes the default timeslice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. + * + * Return: On success, 0 and the timeslice is in @interval. Otherwise, + * an error code. */ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, struct timespec __user *, interval) @@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void) * @cpu: the processor in question. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! + * + * Return: The current task for @cpu. */ struct task_struct *curr_task(int cpu) { diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 1095e878a46f..8b836b376d91 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -62,7 +62,7 @@ static int convert_prio(int prio) * any discrepancies created by racing against the uncertainty of the current * priority configuration. * - * Returns: (int)bool - CPUs were found + * Return: (int)bool - CPUs were found */ int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) @@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * cpupri_init - initialize the cpupri structure * @cp: The cpupri context * - * Returns: -ENOMEM if memory fails. + * Return: -ENOMEM on memory allocation failure. */ int cpupri_init(struct cpupri *cp) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9565645e3202..68f1609ca149 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) */ update_entity_load_avg(curr, 1); update_cfs_rq_blocked_load(cfs_rq, 1); + update_cfs_shares(cfs_rq); #ifdef CONFIG_SCHED_HRTICK /* @@ -4280,6 +4281,8 @@ struct sg_lb_stats { * get_sd_load_idx - Obtain the load index for a given sched domain. * @sd: The sched_domain whose load_idx is to be obtained. * @idle: The Idle status of the CPU for whose sd load_icx is obtained. + * + * Return: The load index. */ static inline int get_sd_load_idx(struct sched_domain *sd, enum cpu_idle_type idle) @@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, * * Determine if @sg is a busier group than the previously selected * busiest group. + * + * Return: %true if @sg is a busier group than the previously selected + * busiest group. %false otherwise. */ static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, @@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, * assuming lower CPU number will be equivalent to lower a SMT thread * number. * - * Returns 1 when packing is required and a task should be moved to + * Return: 1 when packing is required and a task should be moved to * this CPU. The amount of the imbalance is returned in *imbalance. * * @env: The load balancing environment. @@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. * - * Returns: - the busiest group if imbalance exists. + * Return: - The busiest group if imbalance exists. * - If no imbalance and user has opted for power-savings balance, * return the least loaded group whose CPUs can be * put to idle by rebalancing its tasks onto our group. diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8ce9eefc5bb4..a6d098c6df3f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2169,12 +2169,57 @@ static cycle_t ftrace_update_time; static unsigned long ftrace_update_cnt; unsigned long ftrace_update_tot_cnt; -static int ops_traces_mod(struct ftrace_ops *ops) +static inline int ops_traces_mod(struct ftrace_ops *ops) { - struct ftrace_hash *hash; + /* + * Filter_hash being empty will default to trace module. + * But notrace hash requires a test of individual module functions. + */ + return ftrace_hash_empty(ops->filter_hash) && + ftrace_hash_empty(ops->notrace_hash); +} + +/* + * Check if the current ops references the record. + * + * If the ops traces all functions, then it was already accounted for. + * If the ops does not trace the current record function, skip it. + * If the ops ignores the function via notrace filter, skip it. + */ +static inline bool +ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) +{ + /* If ops isn't enabled, ignore it */ + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) + return 0; + + /* If ops traces all mods, we already accounted for it */ + if (ops_traces_mod(ops)) + return 0; + + /* The function must be in the filter */ + if (!ftrace_hash_empty(ops->filter_hash) && + !ftrace_lookup_ip(ops->filter_hash, rec->ip)) + return 0; + + /* If in notrace hash, we ignore it too */ + if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) + return 0; + + return 1; +} + +static int referenced_filters(struct dyn_ftrace *rec) +{ + struct ftrace_ops *ops; + int cnt = 0; - hash = ops->filter_hash; - return ftrace_hash_empty(hash); + for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { + if (ops_references_rec(ops, rec)) + cnt++; + } + + return cnt; } static int ftrace_update_code(struct module *mod) @@ -2183,6 +2228,7 @@ static int ftrace_update_code(struct module *mod) struct dyn_ftrace *p; cycle_t start, stop; unsigned long ref = 0; + bool test = false; int i; /* @@ -2196,9 +2242,12 @@ static int ftrace_update_code(struct module *mod) for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { - if (ops->flags & FTRACE_OPS_FL_ENABLED && - ops_traces_mod(ops)) - ref++; + if (ops->flags & FTRACE_OPS_FL_ENABLED) { + if (ops_traces_mod(ops)) + ref++; + else + test = true; + } } } @@ -2208,12 +2257,16 @@ static int ftrace_update_code(struct module *mod) for (pg = ftrace_new_pgs; pg; pg = pg->next) { for (i = 0; i < pg->index; i++) { + int cnt = ref; + /* If something went wrong, bail without enabling anything */ if (unlikely(ftrace_disabled)) return -1; p = &pg->records[i]; - p->flags = ref; + if (test) + cnt += referenced_filters(p); + p->flags = cnt; /* * Do the initial record conversion from mcount jump @@ -2233,7 +2286,7 @@ static int ftrace_update_code(struct module *mod) * conversion puts the module to the correct state, thus * passing the ftrace_make_call check. */ - if (ftrace_start_up && ref) { + if (ftrace_start_up && cnt) { int failed = __ftrace_replace_code(p, 1); if (failed) ftrace_bug(failed, p->ip); @@ -3384,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) return add_hash_entry(hash, ip); } +static void ftrace_ops_update_code(struct ftrace_ops *ops) +{ + if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) + ftrace_run_update_code(FTRACE_UPDATE_CALLS); +} + static int ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, unsigned long ip, int remove, int reset, int enable) @@ -3426,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, mutex_lock(&ftrace_lock); ret = ftrace_hash_move(ops, enable, orig_hash, hash); - if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED - && ftrace_enabled) - ftrace_run_update_code(FTRACE_UPDATE_CALLS); + if (!ret) + ftrace_ops_update_code(ops); mutex_unlock(&ftrace_lock); @@ -3655,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) mutex_lock(&ftrace_lock); ret = ftrace_hash_move(iter->ops, filter_hash, orig_hash, iter->hash); - if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) - && ftrace_enabled) - ftrace_run_update_code(FTRACE_UPDATE_CALLS); + if (!ret) + ftrace_ops_update_code(iter->ops); mutex_unlock(&ftrace_lock); } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 882ec1dd1515..496f94d57698 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer, } EXPORT_SYMBOL_GPL(filter_current_check_discard); -cycle_t ftrace_now(int cpu) +cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ - if (!global_trace.trace_buffer.buffer) + if (!buf->buffer) return trace_clock_local(); - ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu); - ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts); + ts = ring_buffer_time_stamp(buf->buffer, cpu); + ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; } +cycle_t ftrace_now(int cpu) +{ + return buffer_ftrace_now(&global_trace.trace_buffer, cpu); +} + /** * tracing_is_enabled - Show if global_trace has been disabled * @@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) /* Make sure all commits have finished */ synchronize_sched(); - buf->time_start = ftrace_now(buf->cpu); + buf->time_start = buffer_ftrace_now(buf, buf->cpu); for_each_online_cpu(cpu) ring_buffer_reset_cpu(buffer, cpu); @@ -1219,11 +1224,6 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) ring_buffer_record_enable(buffer); } -void tracing_reset_current(int cpu) -{ - tracing_reset(&global_trace.trace_buffer, cpu); -} - /* Must have trace_types_lock held */ void tracing_reset_all_online_cpus(void) { @@ -4151,6 +4151,7 @@ waitagain: memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); + cpumask_clear(iter->started); iter->pos = -1; trace_event_read_lock(); @@ -4468,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) /* disable tracing ? */ if (trace_flags & TRACE_ITER_STOP_ON_FREE) - tracing_off(); + tracer_tracing_off(tr); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); @@ -4633,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ - tracing_reset_online_cpus(&global_trace.trace_buffer); + tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); - tracing_reset_online_cpus(&global_trace.max_buffer); + tracing_reset_online_cpus(&tr->max_buffer); #endif mutex_unlock(&trace_types_lock); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 898f868833f2..29a7ebcfb426 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir) mutex_unlock(&event_mutex); } -/* - * Open and update trace_array ref count. - * Must have the current trace_array passed to it. - */ -static int tracing_open_generic_file(struct inode *inode, struct file *filp) +static void remove_subsystem(struct ftrace_subsystem_dir *dir) { - struct ftrace_event_file *file = inode->i_private; - struct trace_array *tr = file->tr; - int ret; + if (!dir) + return; - if (trace_array_get(tr) < 0) - return -ENODEV; + if (!--dir->nr_events) { + debugfs_remove_recursive(dir->entry); + list_del(&dir->list); + __put_system_dir(dir); + } +} - ret = tracing_open_generic(inode, filp); - if (ret < 0) - trace_array_put(tr); - return ret; +static void *event_file_data(struct file *filp) +{ + return ACCESS_ONCE(file_inode(filp)->i_private); } -static int tracing_release_generic_file(struct inode *inode, struct file *filp) +static void remove_event_file_dir(struct ftrace_event_file *file) { - struct ftrace_event_file *file = inode->i_private; - struct trace_array *tr = file->tr; + struct dentry *dir = file->dir; + struct dentry *child; - trace_array_put(tr); + if (dir) { + spin_lock(&dir->d_lock); /* probably unneeded */ + list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { + if (child->d_inode) /* probably unneeded */ + child->d_inode->i_private = NULL; + } + spin_unlock(&dir->d_lock); - return 0; + debugfs_remove_recursive(dir); + } + + list_del(&file->list); + remove_subsystem(file->system); + kmem_cache_free(file_cachep, file); } /* @@ -679,15 +688,25 @@ static ssize_t event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_file *file = filp->private_data; + struct ftrace_event_file *file; + unsigned long flags; char buf[4] = "0"; - if (file->flags & FTRACE_EVENT_FL_ENABLED && - !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) + mutex_lock(&event_mutex); + file = event_file_data(filp); + if (likely(file)) + flags = file->flags; + mutex_unlock(&event_mutex); + + if (!file) + return -ENODEV; + + if (flags & FTRACE_EVENT_FL_ENABLED && + !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) strcpy(buf, "1"); - if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED || - file->flags & FTRACE_EVENT_FL_SOFT_MODE) + if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || + flags & FTRACE_EVENT_FL_SOFT_MODE) strcat(buf, "*"); strcat(buf, "\n"); @@ -699,13 +718,10 @@ static ssize_t event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_file *file = filp->private_data; + struct ftrace_event_file *file; unsigned long val; int ret; - if (!file) - return -EINVAL; - ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; @@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, switch (val) { case 0: case 1: + ret = -ENODEV; mutex_lock(&event_mutex); - ret = ftrace_event_enable_disable(file, val); + file = event_file_data(filp); + if (likely(file)) + ret = ftrace_event_enable_disable(file, val); mutex_unlock(&event_mutex); break; @@ -825,7 +844,7 @@ enum { static void *f_next(struct seq_file *m, void *v, loff_t *pos) { - struct ftrace_event_call *call = m->private; + struct ftrace_event_call *call = event_file_data(m->private); struct list_head *common_head = &ftrace_common_fields; struct list_head *head = trace_get_fields(call); struct list_head *node = v; @@ -857,7 +876,7 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos) static int f_show(struct seq_file *m, void *v) { - struct ftrace_event_call *call = m->private; + struct ftrace_event_call *call = event_file_data(m->private); struct ftrace_event_field *field; const char *array_descriptor; @@ -910,6 +929,11 @@ static void *f_start(struct seq_file *m, loff_t *pos) void *p = (void *)FORMAT_HEADER; loff_t l = 0; + /* ->stop() is called even if ->start() fails */ + mutex_lock(&event_mutex); + if (!event_file_data(m->private)) + return ERR_PTR(-ENODEV); + while (l < *pos && p) p = f_next(m, p, &l); @@ -918,6 +942,7 @@ static void *f_start(struct seq_file *m, loff_t *pos) static void f_stop(struct seq_file *m, void *p) { + mutex_unlock(&event_mutex); } static const struct seq_operations trace_format_seq_ops = { @@ -929,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = { static int trace_format_open(struct inode *inode, struct file *file) { - struct ftrace_event_call *call = inode->i_private; struct seq_file *m; int ret; @@ -938,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file) return ret; m = file->private_data; - m->private = call; + m->private = file; return 0; } @@ -946,14 +970,18 @@ static int trace_format_open(struct inode *inode, struct file *file) static ssize_t event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_call *call = filp->private_data; + int id = (long)event_file_data(filp); char buf[32]; int len; if (*ppos) return 0; - len = sprintf(buf, "%d\n", call->event.type); + if (unlikely(!id)) + return -ENODEV; + + len = sprintf(buf, "%d\n", id); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } @@ -961,21 +989,28 @@ static ssize_t event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_call *call = filp->private_data; + struct ftrace_event_call *call; struct trace_seq *s; - int r; + int r = -ENODEV; if (*ppos) return 0; s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) return -ENOMEM; trace_seq_init(s); - print_event_filter(call, s); - r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); + mutex_lock(&event_mutex); + call = event_file_data(filp); + if (call) + print_event_filter(call, s); + mutex_unlock(&event_mutex); + + if (call) + r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); kfree(s); @@ -986,9 +1021,9 @@ static ssize_t event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ftrace_event_call *call = filp->private_data; + struct ftrace_event_call *call; char *buf; - int err; + int err = -ENODEV; if (cnt >= PAGE_SIZE) return -EINVAL; @@ -1003,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, } buf[cnt] = '\0'; - err = apply_event_filter(call, buf); + mutex_lock(&event_mutex); + call = event_file_data(filp); + if (call) + err = apply_event_filter(call, buf); + mutex_unlock(&event_mutex); + free_page((unsigned long) buf); if (err < 0) return err; @@ -1225,10 +1265,9 @@ static const struct file_operations ftrace_set_event_fops = { }; static const struct file_operations ftrace_enable_fops = { - .open = tracing_open_generic_file, + .open = tracing_open_generic, .read = event_enable_read, .write = event_enable_write, - .release = tracing_release_generic_file, .llseek = default_llseek, }; @@ -1240,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = { }; static const struct file_operations ftrace_event_id_fops = { - .open = tracing_open_generic, .read = event_id_read, .llseek = default_llseek, }; @@ -1488,8 +1526,8 @@ event_create_dir(struct dentry *parent, #ifdef CONFIG_PERF_EVENTS if (call->event.type && call->class->reg) - trace_create_file("id", 0444, file->dir, call, - id); + trace_create_file("id", 0444, file->dir, + (void *)(long)call->event.type, id); #endif /* @@ -1514,33 +1552,16 @@ event_create_dir(struct dentry *parent, return 0; } -static void remove_subsystem(struct ftrace_subsystem_dir *dir) -{ - if (!dir) - return; - - if (!--dir->nr_events) { - debugfs_remove_recursive(dir->entry); - list_del(&dir->list); - __put_system_dir(dir); - } -} - static void remove_event_from_tracers(struct ftrace_event_call *call) { struct ftrace_event_file *file; struct trace_array *tr; do_for_each_event_file_safe(tr, file) { - if (file->event_call != call) continue; - list_del(&file->list); - debugfs_remove_recursive(file->dir); - remove_subsystem(file->system); - kmem_cache_free(file_cachep, file); - + remove_event_file_dir(file); /* * The do_for_each_event_file_safe() is * a double loop. After finding the call for this @@ -1692,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) destroy_preds(call); } +static int probe_remove_event_call(struct ftrace_event_call *call) +{ + struct trace_array *tr; + struct ftrace_event_file *file; + +#ifdef CONFIG_PERF_EVENTS + if (call->perf_refcount) + return -EBUSY; +#endif + do_for_each_event_file(tr, file) { + if (file->event_call != call) + continue; + /* + * We can't rely on ftrace_event_enable_disable(enable => 0) + * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress + * TRACE_REG_UNREGISTER. + */ + if (file->flags & FTRACE_EVENT_FL_ENABLED) + return -EBUSY; + /* + * The do_for_each_event_file_safe() is + * a double loop. After finding the call for this + * trace_array, we use break to jump to the next + * trace_array. + */ + break; + } while_for_each_event_file(); + + __trace_remove_event_call(call); + + return 0; +} + /* Remove an event_call */ -void trace_remove_event_call(struct ftrace_event_call *call) +int trace_remove_event_call(struct ftrace_event_call *call) { + int ret; + mutex_lock(&trace_types_lock); mutex_lock(&event_mutex); down_write(&trace_event_sem); - __trace_remove_event_call(call); + ret = probe_remove_event_call(call); up_write(&trace_event_sem); mutex_unlock(&event_mutex); mutex_unlock(&trace_types_lock); + + return ret; } #define for_each_event(event, start, end) \ @@ -2270,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr) { struct ftrace_event_file *file, *next; - list_for_each_entry_safe(file, next, &tr->events, list) { - list_del(&file->list); - debugfs_remove_recursive(file->dir); - remove_subsystem(file->system); - kmem_cache_free(file_cachep, file); - } + list_for_each_entry_safe(file, next, &tr->events, list) + remove_event_file_dir(file); } static void diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 0c7b75a8acc8..97daa8cf958d 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps, free_page((unsigned long) buf); } +/* caller must hold event_mutex */ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) { - struct event_filter *filter; + struct event_filter *filter = call->filter; - mutex_lock(&event_mutex); - filter = call->filter; if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else trace_seq_puts(s, "none\n"); - mutex_unlock(&event_mutex); } void print_subsystem_event_filter(struct event_subsystem *system, @@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system, return err; } +/* caller must hold event_mutex */ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) { struct event_filter *filter; - int err = 0; - - mutex_lock(&event_mutex); + int err; if (!strcmp(strstrip(filter_string), "0")) { filter_disable(call); filter = call->filter; if (!filter) - goto out_unlock; + return 0; RCU_INIT_POINTER(call->filter, NULL); /* Make sure the filter is not being used */ synchronize_sched(); __free_filter(filter); - goto out_unlock; + return 0; } err = create_filter(call, filter_string, true, &filter); @@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) __free_filter(tmp); } } -out_unlock: - mutex_unlock(&event_mutex); return err; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3811487e7a7a..243f6834d026 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) } static int register_probe_event(struct trace_probe *tp); -static void unregister_probe_event(struct trace_probe *tp); +static int unregister_probe_event(struct trace_probe *tp); static DEFINE_MUTEX(probe_lock); static LIST_HEAD(probe_list); @@ -351,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp) if (trace_probe_is_enabled(tp)) return -EBUSY; + /* Will fail if probe is being used by ftrace or perf */ + if (unregister_probe_event(tp)) + return -EBUSY; + __unregister_trace_probe(tp); list_del(&tp->list); - unregister_probe_event(tp); return 0; } @@ -632,7 +635,9 @@ static int release_all_trace_probes(void) /* TODO: Use batch unregistration */ while (!list_empty(&probe_list)) { tp = list_entry(probe_list.next, struct trace_probe, list); - unregister_trace_probe(tp); + ret = unregister_trace_probe(tp); + if (ret) + goto end; free_trace_probe(tp); } @@ -1247,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp) return ret; } -static void unregister_probe_event(struct trace_probe *tp) +static int unregister_probe_event(struct trace_probe *tp) { + int ret; + /* tp->event is unregistered in trace_remove_event_call() */ - trace_remove_event_call(&tp->call); - kfree(tp->call.print_fmt); + ret = trace_remove_event_call(&tp->call); + if (!ret) + kfree(tp->call.print_fmt); + return ret; } /* Make a debugfs interface for controlling probe points */ diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index a23d2d71188e..272261b5f94f 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -70,7 +70,7 @@ struct trace_uprobe { (sizeof(struct probe_arg) * (n))) static int register_uprobe_event(struct trace_uprobe *tu); -static void unregister_uprobe_event(struct trace_uprobe *tu); +static int unregister_uprobe_event(struct trace_uprobe *tu); static DEFINE_MUTEX(uprobe_lock); static LIST_HEAD(uprobe_list); @@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou } /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ -static void unregister_trace_uprobe(struct trace_uprobe *tu) +static int unregister_trace_uprobe(struct trace_uprobe *tu) { + int ret; + + ret = unregister_uprobe_event(tu); + if (ret) + return ret; + list_del(&tu->list); - unregister_uprobe_event(tu); free_trace_uprobe(tu); + return 0; } /* Register a trace_uprobe and probe_event */ @@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu) /* register as an event */ old_tp = find_probe_event(tu->call.name, tu->call.class->system); - if (old_tp) + if (old_tp) { /* delete old event */ - unregister_trace_uprobe(old_tp); + ret = unregister_trace_uprobe(old_tp); + if (ret) + goto end; + } ret = register_uprobe_event(tu); if (ret) { @@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv) group = UPROBE_EVENT_SYSTEM; if (is_delete) { + int ret; + if (!event) { pr_info("Delete command needs an event name.\n"); return -EINVAL; @@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv) return -ENOENT; } /* delete an event */ - unregister_trace_uprobe(tu); + ret = unregister_trace_uprobe(tu); mutex_unlock(&uprobe_lock); - return 0; + return ret; } if (argc < 2) { @@ -408,16 +419,20 @@ fail_address_parse: return ret; } -static void cleanup_all_probes(void) +static int cleanup_all_probes(void) { struct trace_uprobe *tu; + int ret = 0; mutex_lock(&uprobe_lock); while (!list_empty(&uprobe_list)) { tu = list_entry(uprobe_list.next, struct trace_uprobe, list); - unregister_trace_uprobe(tu); + ret = unregister_trace_uprobe(tu); + if (ret) + break; } mutex_unlock(&uprobe_lock); + return ret; } /* Probes listing interfaces */ @@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = { static int probes_open(struct inode *inode, struct file *file) { - if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) - cleanup_all_probes(); + int ret; + + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { + ret = cleanup_all_probes(); + if (ret) + return ret; + } return seq_open(file, &probes_seq_op); } @@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu) return ret; } -static void unregister_uprobe_event(struct trace_uprobe *tu) +static int unregister_uprobe_event(struct trace_uprobe *tu) { + int ret; + /* tu->event is unregistered in trace_remove_event_call() */ - trace_remove_event_call(&tu->call); + ret = trace_remove_event_call(&tu->call); + if (ret) + return ret; kfree(tu->call.print_fmt); tu->call.print_fmt = NULL; + return 0; } /* Make a trace interface for controling probe points */ diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index d8c30db06c5b..9064b919a406 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -62,6 +62,9 @@ int create_user_ns(struct cred *new) kgid_t group = new->egid; int ret; + if (parent_ns->level > 32) + return -EUSERS; + /* * Verify that we can not violate the policy of which files * may be accessed that is specified by the root directory, @@ -92,6 +95,7 @@ int create_user_ns(struct cred *new) atomic_set(&ns->count, 1); /* Leave the new->user_ns reference with the new user namespace. */ ns->parent = parent_ns; + ns->level = parent_ns->level + 1; ns->owner = owner; ns->group = group; @@ -105,16 +109,21 @@ int create_user_ns(struct cred *new) int unshare_userns(unsigned long unshare_flags, struct cred **new_cred) { struct cred *cred; + int err = -ENOMEM; if (!(unshare_flags & CLONE_NEWUSER)) return 0; cred = prepare_creds(); - if (!cred) - return -ENOMEM; + if (cred) { + err = create_user_ns(cred); + if (err) + put_cred(cred); + else + *new_cred = cred; + } - *new_cred = cred; - return create_user_ns(cred); + return err; } void free_user_ns(struct user_namespace *ns) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0b72e816b8d0..7f5d4be22034 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2817,6 +2817,19 @@ already_gone: return false; } +static bool __flush_work(struct work_struct *work) +{ + struct wq_barrier barr; + + if (start_flush_work(work, &barr)) { + wait_for_completion(&barr.done); + destroy_work_on_stack(&barr.work); + return true; + } else { + return false; + } +} + /** * flush_work - wait for a work to finish executing the last queueing instance * @work: the work to flush @@ -2830,18 +2843,10 @@ already_gone: */ bool flush_work(struct work_struct *work) { - struct wq_barrier barr; - lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); - if (start_flush_work(work, &barr)) { - wait_for_completion(&barr.done); - destroy_work_on_stack(&barr.work); - return true; - } else { - return false; - } + return __flush_work(work); } EXPORT_SYMBOL_GPL(flush_work); @@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, { to->nice = from->nice; cpumask_copy(to->cpumask, from->cpumask); + /* + * Unlike hash and equality test, this function doesn't ignore + * ->no_numa as it is used for both pool and wq attrs. Instead, + * get_unbound_pool() explicitly clears ->no_numa after copying. + */ + to->no_numa = from->no_numa; } /* hash value of the content of @attr */ @@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ copy_workqueue_attrs(pool->attrs, attrs); + /* + * no_numa isn't a worker_pool attribute, always clear it. See + * 'struct workqueue_attrs' comments for detail. + */ + pool->attrs->no_numa = false; + /* if cpumask is contained inside a NUMA node, we belong to that node */ if (wq_numa_enabled) { for_each_node(node) { @@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); schedule_work_on(cpu, &wfc.work); - flush_work(&wfc.work); + + /* + * The work item is on-stack and can't lead to deadlock through + * flushing. Use __flush_work() to avoid spurious lockdep warnings + * when work_on_cpu()s are nested. + */ + __flush_work(&wfc.work); + return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); diff --git a/mm/fremap.c b/mm/fremap.c index 87da3590c61e..5bff08147768 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot) { int err = -ENOMEM; - pte_t *pte; + pte_t *pte, ptfile; spinlock_t *ptl; pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; - if (!pte_none(*pte)) + ptfile = pgoff_to_pte(pgoff); + + if (!pte_none(*pte)) { + if (pte_present(*pte) && pte_soft_dirty(*pte)) + pte_file_mksoft_dirty(ptfile); zap_pte(mm, vma, addr, pte); + } - set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); + set_pte_at(mm, addr, pte, ptfile); /* * We don't need to run update_mmu_cache() here because the "file pte" * being installed by install_file_pte() is not a real pte - it's a diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83aff0a4d093..b60f33080a28 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, mm = vma->vm_mm; - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, start, end); __unmap_hugepage_range(&tlb, vma, start, end, ref_page); tlb_finish_mmu(&tlb, start, end); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c290a1cf3862..c5792a5d87ce 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, if (!s->memcg_params) return -ENOMEM; - INIT_WORK(&s->memcg_params->destroy, - kmem_cache_destroy_work_func); if (memcg) { s->memcg_params->memcg = memcg; s->memcg_params->root_cache = root_cache; + INIT_WORK(&s->memcg_params->destroy, + kmem_cache_destroy_work_func); } else s->memcg_params->is_root_cache = true; diff --git a/mm/memory.c b/mm/memory.c index 1ce2e2a734fc..af84bc0ec17c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb) * tear-down from @mm. The @fullmm argument is used when @mm is without * users and we're going to destroy the full address space (exit/execve). */ -void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; - tlb->fullmm = fullmm; + /* Is it from 0 to ~0? */ + tlb->fullmm = !(start | (end+1)); tlb->need_flush_all = 0; - tlb->start = -1UL; - tlb->end = 0; + tlb->start = start; + tlb->end = end; tlb->need_flush = 0; tlb->local.next = NULL; tlb->local.nr = 0; @@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e { struct mmu_gather_batch *batch, *next; - tlb->start = start; - tlb->end = end; tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ @@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, spinlock_t *ptl; pte_t *start_pte; pte_t *pte; - unsigned long range_start = addr; again: init_rss_vec(rss); @@ -1141,9 +1139,12 @@ again: continue; if (unlikely(details) && details->nonlinear_vma && linear_page_index(details->nonlinear_vma, - addr) != page->index) - set_pte_at(mm, addr, pte, - pgoff_to_pte(page->index)); + addr) != page->index) { + pte_t ptfile = pgoff_to_pte(page->index); + if (pte_soft_dirty(ptent)) + pte_file_mksoft_dirty(ptfile); + set_pte_at(mm, addr, pte, ptfile); + } if (PageAnon(page)) rss[MM_ANONPAGES]--; else { @@ -1202,17 +1203,25 @@ again: * and page-free while holding it. */ if (force_flush) { + unsigned long old_end; + force_flush = 0; -#ifdef HAVE_GENERIC_MMU_GATHER - tlb->start = range_start; + /* + * Flush the TLB just for the previous segment, + * then update the range to be the remaining + * TLB range. + */ + old_end = tlb->end; tlb->end = addr; -#endif + tlb_flush_mmu(tlb); - if (addr != end) { - range_start = addr; + + tlb->start = addr; + tlb->end = old_end; + + if (addr != end) goto again; - } } return addr; @@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end = start + size; lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, start, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next) @@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr unsigned long end = address + size; lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, address, end); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, address, end); unmap_single_vma(&tlb, vma, address, end, details); @@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, exclusive = 1; } flush_icache_page(vma, page); + if (pte_swp_soft_dirty(orig_pte)) + pte = pte_mksoft_dirty(pte); set_pte_at(mm, address, page_table, pte); if (page == swapcache) do_page_add_anon_rmap(page, vma, address, exclusive); @@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) entry = maybe_mkwrite(pte_mkdirty(entry), vma); + else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) + pte_mksoft_dirty(entry); if (anon) { inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); diff --git a/mm/mmap.c b/mm/mmap.c index 1edbaa3136c3..f9c97d10b873 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm, struct mmu_gather tlb; lru_add_drain(); - tlb_gather_mmu(&tlb, mm, 0); + tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, @@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm) lru_add_drain(); flush_cache_mm(mm); - tlb_gather_mmu(&tlb, mm, 1); + tlb_gather_mmu(&tlb, mm, 0, -1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); diff --git a/mm/rmap.c b/mm/rmap.c index cd356df4f71a..b2e29acd7e3d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, swp_entry_to_pte(make_hwpoison_entry(page))); } else if (PageAnon(page)) { swp_entry_t entry = { .val = page_private(page) }; + pte_t swp_pte; if (PageSwapCache(page)) { /* @@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); entry = make_migration_entry(page, pte_write(pteval)); } - set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + set_pte_at(mm, address, pte, swp_pte); BUG_ON(pte_file(*pte)); } else if (IS_ENABLED(CONFIG_MIGRATION) && (TTU_ACTION(flags) == TTU_MIGRATION)) { @@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, pteval = ptep_clear_flush(vma, address, pte); /* If nonlinear, store the file page offset in the pte. */ - if (page->index != linear_page_index(vma, address)) - set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); + if (page->index != linear_page_index(vma, address)) { + pte_t ptfile = pgoff_to_pte(page->index); + if (pte_soft_dirty(pteval)) + pte_file_mksoft_dirty(ptfile); + set_pte_at(mm, address, pte, ptfile); + } /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) diff --git a/mm/shmem.c b/mm/shmem.c index a87990cf9f94..8335dbd3fc35 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) } } - offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); + if (offset >= 0) + offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); mutex_unlock(&inode->i_mutex); return offset; } diff --git a/mm/slub.c b/mm/slub.c index 2b02d666bf63..e3ba1f2cf60c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) int pages; int pobjects; - if (!s->cpu_partial) - return; - do { pages = 0; pobjects = 0; diff --git a/mm/swapfile.c b/mm/swapfile.c index 36af6eeaa67e..6cf2e60983b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free) } #endif /* CONFIG_HIBERNATION */ +static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) +{ +#ifdef CONFIG_MEM_SOFT_DIRTY + /* + * When pte keeps soft dirty bit the pte generated + * from swap entry does not has it, still it's same + * pte from logical point of view. + */ + pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte); + return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); +#else + return pte_same(pte, swp_pte); +#endif +} + /* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to @@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, } pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); - if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { + if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { mem_cgroup_cancel_charge_swapin(memcg); ret = 0; goto out; @@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, * swapoff spends a _lot_ of time in this loop! * Test inline before going to call unuse_pte. */ - if (unlikely(pte_same(*pte, swp_pte))) { + if (unlikely(maybe_same_pte(*pte, swp_pte))) { pte_unmap(pte); ret = unuse_pte(vma, pmd, addr, entry, page); if (ret) diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4a78c4de9f20..6ee48aac776f 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep); struct net_device *vlan_dev_real_dev(const struct net_device *dev) { - return vlan_dev_priv(dev)->real_dev; + struct net_device *ret = vlan_dev_priv(dev)->real_dev; + + while (is_vlan_dev(ret)) + ret = vlan_dev_priv(ret)->real_dev; + + return ret; } EXPORT_SYMBOL(vlan_dev_real_dev); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index e14531f1ce1c..264de88db320 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1529,6 +1529,8 @@ out: * in these cases, the skb is further handled by this function and * returns 1, otherwise it returns 0 and the caller shall further * process the skb. + * + * This call might reallocate skb data. */ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index f105219f4a4b..7614af31daff 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -508,6 +508,7 @@ out: return 0; } +/* this call might reallocate skb data */ static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) { int ret = false; @@ -568,6 +569,7 @@ out: return ret; } +/* this call might reallocate skb data */ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) { struct ethhdr *ethhdr; @@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) return false; + + /* skb->data might have been reallocated by pskb_may_pull() */ + ethhdr = (struct ethhdr *)skb->data; + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) + ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); + udphdr = (struct udphdr *)(skb->data + *header_len); *header_len += sizeof(*udphdr); @@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) return true; } +/* this call might reallocate skb data */ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, - struct sk_buff *skb, struct ethhdr *ethhdr) + struct sk_buff *skb) { struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; struct batadv_orig_node *orig_dst_node = NULL; struct batadv_gw_node *curr_gw = NULL; + struct ethhdr *ethhdr; bool ret, out_of_range = false; unsigned int header_len = 0; uint8_t curr_tq_avg; @@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, if (!ret) goto out; + ethhdr = (struct ethhdr *)skb->data; orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest); if (!orig_dst_node) diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 039902dca4a6..1037d75da51f 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv, void batadv_gw_node_purge(struct batadv_priv *bat_priv); int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); -bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, - struct sk_buff *skb, struct ethhdr *ethhdr); +bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb); #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 700d0b49742d..0f04e1c302b4 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb, if (batadv_bla_tx(bat_priv, skb, vid)) goto dropped; + /* skb->data might have been reallocated by batadv_bla_tx() */ + ethhdr = (struct ethhdr *)skb->data; + /* Register the client MAC in the transtable */ if (!is_multicast_ether_addr(ethhdr->h_source)) batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); @@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb, default: break; } + + /* reminder: ethhdr might have become unusable from here on + * (batadv_gw_is_dhcp_target() might have reallocated skb data) + */ } /* ethernet packet should be broadcasted */ @@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb, /* unicast packet */ } else { if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { - ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); + ret = batadv_gw_out_of_range(bat_priv, skb); if (ret) goto dropped; } diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index dc8b5d4dd636..688a0419756b 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size, * @skb: the skb containing the payload to encapsulate * @orig_node: the destination node * - * Returns false if the payload could not be encapsulated or true otherwise + * Returns false if the payload could not be encapsulated or true otherwise. + * + * This call might reallocate skb data. */ static bool batadv_unicast_prepare_skb(struct sk_buff *skb, struct batadv_orig_node *orig_node) @@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb, * @orig_node: the destination node * @packet_subtype: the batman 4addr packet subtype to use * - * Returns false if the payload could not be encapsulated or true otherwise + * Returns false if the payload could not be encapsulated or true otherwise. + * + * This call might reallocate skb data. */ bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, struct sk_buff *skb, @@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv, struct batadv_neigh_node *neigh_node; int data_len = skb->len; int ret = NET_RX_DROP; - unsigned int dev_mtu; + unsigned int dev_mtu, header_len; /* get routing information */ if (is_multicast_ether_addr(ethhdr->h_dest)) { @@ -429,10 +433,12 @@ find_router: switch (packet_type) { case BATADV_UNICAST: batadv_unicast_prepare_skb(skb, orig_node); + header_len = sizeof(struct batadv_unicast_packet); break; case BATADV_UNICAST_4ADDR: batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, packet_subtype); + header_len = sizeof(struct batadv_unicast_4addr_packet); break; default: /* this function supports UNICAST and UNICAST_4ADDR only. It @@ -441,6 +447,7 @@ find_router: goto out; } + ethhdr = (struct ethhdr *)(skb->data + header_len); unicast_packet = (struct batadv_unicast_packet *)skb->data; /* inform the destination node that we are still missing a correct route diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 61c5e819380e..08e576ada0b2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); if (max_delay) group = &mld->mld_mca; - } else if (skb->len >= sizeof(*mld2q)) { + } else { if (!pskb_may_pull(skb, sizeof(*mld2q))) { err = -EINVAL; goto out; diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 394bb96b6087..3b9637fb7939 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -1,5 +1,5 @@ /* - * Sysfs attributes of bridge ports + * Sysfs attributes of bridge * Linux ethernet bridge * * Authors: diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ade9ff1c1980..159737cac76c 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -65,6 +65,7 @@ ipv6: nhoff += sizeof(struct ipv6hdr); break; } + case __constant_htons(ETH_P_8021AD): case __constant_htons(ETH_P_8021Q): { const struct vlan_hdr *vlan; struct vlan_hdr _vlan; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 242084edb658..2a0e21de3060 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2179,7 +2179,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm, /* If aging addresses are supported device will need to * implement its own handler for this. */ - if (ndm->ndm_state & NUD_PERMANENT) { + if (!(ndm->ndm_state & NUD_PERMANENT)) { pr_info("%s: FDB only supports static addresses\n", dev->name); return -EINVAL; } @@ -2407,7 +2407,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) struct nlattr *extfilt; u32 filter_mask = 0; - extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), + extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), IFLA_EXT_MASK); if (extfilt) filter_mask = nla_get_u32(extfilt); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index ab3d814bc80a..109ee89f123e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) } return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - - net_adj) & ~(align - 1)) + (net_adj - 2); + net_adj) & ~(align - 1)) + net_adj - 2; } static void esp4_err(struct sk_buff *skb, u32 info) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 108a1e9c9eac..3df6d3edb2a1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -71,7 +71,6 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> -#include <linux/prefetch.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/ip.h> @@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c) if (!c) continue; - if (IS_LEAF(c)) { - prefetch(rcu_dereference_rtnl(p->child[idx])); + if (IS_LEAF(c)) return (struct leaf *) c; - } /* Rescan start scanning in new node */ p = (struct tnode *) c; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index bc3a76521deb..d7aea4c5b940 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, if (daddr) memcpy(&iph->daddr, daddr, 4); if (iph->daddr) - return t->hlen; + return t->hlen + sizeof(*iph); return -(t->hlen + sizeof(*iph)); } diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 7167b08977df..850525b34899 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt, iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; - tunnel_ip_select_ident(skb, - (const struct iphdr *)skb_inner_network_header(skb), - &rt->dst); + __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 5f5fa612647f..4a0335854b89 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -278,7 +278,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), - SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), + SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index a9077f441cb2..b6ae92a51f58 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -206,8 +206,8 @@ static u32 cubic_root(u64 a) */ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) { - u64 offs; - u32 delta, t, bic_target, max_cnt; + u32 delta, bic_target, max_cnt; + u64 offs, t; ca->ack_cnt++; /* count the number of ACKs */ @@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) * if the cwnd < 1 million packets !!! */ + t = (s32)(tcp_time_stamp - ca->epoch_start); + t += msecs_to_jiffies(ca->delay_min >> 3); /* change the unit from HZ to bictcp_HZ */ - t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) - - ca->epoch_start) << BICTCP_HZ) / HZ; + t <<= BICTCP_HZ; + do_div(t, HZ); if (t < ca->bic_K) /* t - K */ offs = ca->bic_K - t; @@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) return; /* Discard delay samples right after fast recovery */ - if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) + if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) return; delay = (rtt_us << 3) / USEC_PER_MSEC; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 40ffd72243a4..aeac0dc3635d 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) net_adj = 0; return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - - net_adj) & ~(align - 1)) + (net_adj - 2); + net_adj) & ~(align - 1)) + net_adj - 2; } static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index ed828d6f37b2..73db48eba1c4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root, if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { #ifdef CONFIG_IPV6_SUBTREES - if (fn->subtree) - fn = fib6_lookup_1(fn->subtree, args + 1); + if (fn->subtree) { + struct fib6_node *sfn; + sfn = fib6_lookup_1(fn->subtree, + args + 1); + if (!sfn) + goto backtrack; + fn = sfn; + } #endif - if (!fn || fn->fn_flags & RTN_RTINFO) + if (fn->fn_flags & RTN_RTINFO) return fn; } } - +#ifdef CONFIG_IPV6_SUBTREES +backtrack: +#endif if (fn->fn_flags & RTN_ROOT) break; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 7dcc376eea5f..2f8010707d01 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct, const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; __u32 seq, ack, sack, end, win, swin; s16 receiver_offset; - bool res; + bool res, in_recv_win; /* * Get the required data from the packet. @@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct, receiver->td_end, receiver->td_maxend, receiver->td_maxwin, receiver->td_scale); + /* Is the ending sequence in the receive window (if available)? */ + in_recv_win = !receiver->td_maxwin || + after(end, sender->td_end - receiver->td_maxwin - 1); + pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", before(seq, sender->td_maxend + 1), - after(end, sender->td_end - receiver->td_maxwin - 1), + (in_recv_win ? 1 : 0), before(sack, receiver->td_end + 1), after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); if (before(seq, sender->td_maxend + 1) && - after(end, sender->td_end - receiver->td_maxwin - 1) && + in_recv_win && before(sack, receiver->td_end + 1) && after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { /* @@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct, nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_tcp: %s ", before(seq, sender->td_maxend + 1) ? - after(end, sender->td_end - receiver->td_maxwin - 1) ? + in_recv_win ? before(sack, receiver->td_end + 1) ? after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" : "ACK is under the lower bound (possible overly delayed ACK)" diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 962e9792e317..d92cc317bf8b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(inst->group_num); + memset(&pmsg, 0, sizeof(pmsg)); pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; @@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log, if (indev && skb->dev && skb->mac_header != skb->network_header) { struct nfulnl_msg_packet_hw phw; - int len = dev_parse_header(skb, phw.hw_addr); + int len; + + memset(&phw, 0, sizeof(phw)); + len = dev_parse_header(skb, phw.hw_addr); if (len > 0) { phw.hw_addrlen = htons(len); if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 971ea145ab3e..8a703c3dd318 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (indev && entskb->dev && entskb->mac_header != entskb->network_header) { struct nfqnl_msg_packet_hw phw; - int len = dev_parse_header(entskb, phw.hw_addr); + int len; + + memset(&phw, 0, sizeof(phw)); + len = dev_parse_header(entskb, phw.hw_addr); if (len) { phw.hw_addrlen = htons(len); if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 7011c71646f0..6113cc7efffc 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, { const struct xt_tcpmss_info *info = par->targinfo; struct tcphdr *tcph; - unsigned int tcplen, i; + int len, tcp_hdrlen; + unsigned int i; __be16 oldval; u16 newmss; u8 *opt; @@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (!skb_make_writable(skb, skb->len)) return -1; - tcplen = skb->len - tcphoff; + len = skb->len - tcphoff; + if (len < (int)sizeof(struct tcphdr)) + return -1; + tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); + tcp_hdrlen = tcph->doff * 4; - /* Header cannot be larger than the packet */ - if (tcplen < tcph->doff*4) + if (len < tcp_hdrlen) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { @@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, newmss = info->mss; opt = (u_int8_t *)tcph; - for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { - if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && - opt[i+1] == TCPOLEN_MSS) { + for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { + if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { u_int16_t oldmss; oldmss = (opt[i+2] << 8) | opt[i+3]; @@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, } /* There is data after the header so the option can't be added - without moving it, and doing so may make the SYN packet - itself too large. Accept the packet unmodified instead. */ - if (tcplen > tcph->doff*4) + * without moving it, and doing so may make the SYN packet + * itself too large. Accept the packet unmodified instead. + */ + if (len > tcp_hdrlen) return 0; /* @@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, newmss = min(newmss, (u16)1220); opt = (u_int8_t *)tcph + sizeof(struct tcphdr); - memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); + memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); inet_proto_csum_replace2(&tcph->check, skb, - htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); + htons(len), htons(len + TCPOLEN_MSS), 1); opt[0] = TCPOPT_MSS; opt[1] = TCPOLEN_MSS; opt[2] = (newmss & 0xff00) >> 8; diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c index b68fa191710f..625fa1d636a0 100644 --- a/net/netfilter/xt_TCPOPTSTRIP.c +++ b/net/netfilter/xt_TCPOPTSTRIP.c @@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, struct tcphdr *tcph; u_int16_t n, o; u_int8_t *opt; - int len; + int len, tcp_hdrlen; /* This is a fragment, no TCP header is available */ if (par->fragoff != 0) @@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, return NF_DROP; tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); - if (tcph->doff * 4 > len) + tcp_hdrlen = tcph->doff * 4; + + if (len < tcp_hdrlen) return NF_DROP; opt = (u_int8_t *)tcph; @@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, * Walk through all TCP options - if we find some option to remove, * set all octets to %TCPOPT_NOP and adjust checksum. */ - for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { + for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) { optl = optlen(opt, i); - if (i + optl > tcp_hdrlen(skb)) + if (i + optl > tcp_hdrlen) break; if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 512718adb0d5..f85f8a2ad6cf 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) struct net *net = sock_net(skb->sk); int chains_to_skip = cb->args[0]; int fams_to_skip = cb->args[1]; + bool need_locking = chains_to_skip || fams_to_skip; + + if (need_locking) + genl_lock(); for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) { n = 0; @@ -810,6 +814,9 @@ errout: cb->args[0] = i; cb->args[1] = n; + if (need_locking) + genl_unlock(); + return skb->len; } diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 22c5f399f1cf..ab101f715447 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) { struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + OVS_CB(skb)->tun_key = NULL; return do_execute_actions(dp, skb, acts->actions, acts->actions_len, false); } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f7e3a0d84c40..f2ed7600084e 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) ovs_notify(reply, info, &ovs_dp_vport_multicast_group); return 0; - rtnl_unlock(); - return 0; - exit_free: kfree_skb(reply); exit_unlock: diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 5c519b121e1b..1aa84dc58777 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) struct flex_array *buckets; int i, err; - buckets = flex_array_alloc(sizeof(struct hlist_head *), + buckets = flex_array_alloc(sizeof(struct hlist_head), n_buckets, GFP_KERNEL); if (!buckets) return NULL; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 281c1bded1f6..51b968d3febb 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) return q; } +/* The linklayer setting were not transferred from iproute2, in older + * versions, and the rate tables lookup systems have been dropped in + * the kernel. To keep backward compatible with older iproute2 tc + * utils, we detect the linklayer setting by detecting if the rate + * table were modified. + * + * For linklayer ATM table entries, the rate table will be aligned to + * 48 bytes, thus some table entries will contain the same value. The + * mpu (min packet unit) is also encoded into the old rate table, thus + * starting from the mpu, we find low and high table entries for + * mapping this cell. If these entries contain the same value, when + * the rate tables have been modified for linklayer ATM. + * + * This is done by rounding mpu to the nearest 48 bytes cell/entry, + * and then roundup to the next cell, calc the table entry one below, + * and compare. + */ +static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) +{ + int low = roundup(r->mpu, 48); + int high = roundup(low+1, 48); + int cell_low = low >> r->cell_log; + int cell_high = (high >> r->cell_log) - 1; + + /* rtab is too inaccurate at rates > 100Mbit/s */ + if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { + pr_debug("TC linklayer: Giving up ATM detection\n"); + return TC_LINKLAYER_ETHERNET; + } + + if ((cell_high > cell_low) && (cell_high < 256) + && (rtab[cell_low] == rtab[cell_high])) { + pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", + cell_low, cell_high, rtab[cell_high]); + return TC_LINKLAYER_ATM; + } + return TC_LINKLAYER_ETHERNET; +} + static struct qdisc_rate_table *qdisc_rtab_list; struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) @@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta rtab->rate = *r; rtab->refcnt = 1; memcpy(rtab->data, nla_data(tab), 1024); + if (r->linklayer == TC_LINKLAYER_UNAWARE) + r->linklayer = __detect_linklayer(r, rtab->data); rtab->next = qdisc_rtab_list; qdisc_rtab_list = rtab; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 4626cef4b76e..48be3d5c0d92 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -25,6 +25,7 @@ #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/slab.h> +#include <linux/if_vlan.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> #include <net/dst.h> @@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q) unsigned long dev_trans_start(struct net_device *dev) { - unsigned long val, res = dev->trans_start; + unsigned long val, res; unsigned int i; + if (is_vlan_dev(dev)) + dev = vlan_dev_real_dev(dev); + res = dev->trans_start; for (i = 0; i < dev->num_tx_queues; i++) { val = netdev_get_tx_queue(dev, i)->trans_start; if (val && time_after(val, res)) res = val; } dev->trans_start = res; + return res; } EXPORT_SYMBOL(dev_trans_start); @@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; r->rate_bytes_ps = conf->rate; + r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); r->mult = 1; /* * The deal here is to replace a divide by a reciprocal one diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 45e751527dfc..c2178b15ca6e 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)*arg, *parent; struct nlattr *opt = tca[TCA_OPTIONS]; + struct qdisc_rate_table *rtab = NULL, *ctab = NULL; struct nlattr *tb[TCA_HTB_MAX + 1]; struct tc_htb_opt *hopt; @@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, if (!hopt->rate.rate || !hopt->ceil.rate) goto failure; + /* Keeping backward compatible with rate_table based iproute2 tc */ + if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) { + rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); + if (rtab) + qdisc_put_rtab(rtab); + } + if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) { + ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); + if (ctab) + qdisc_put_rtab(ctab); + } + if (!cl) { /* new class */ struct Qdisc *new_q; int prio; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index e051920816e6..cef509985192 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -840,12 +840,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, else spc_state = SCTP_ADDR_AVAILABLE; /* Don't inform ULP about transition from PF to - * active state and set cwnd to 1, see SCTP + * active state and set cwnd to 1 MTU, see SCTP * Quick failover draft section 5.1, point 5 */ if (transport->state == SCTP_PF) { ulp_notify = false; - transport->cwnd = 1; + transport->cwnd = asoc->pathmtu; } transport->state = SCTP_ACTIVE; break; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 6836ead5c957..e332efb124cc 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -175,12 +175,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport) return; } - call_rcu(&transport->rcu, sctp_transport_destroy_rcu); - sctp_packet_free(&transport->packet); if (transport->asoc) sctp_association_put(transport->asoc); + + call_rcu(&transport->rcu, sctp_transport_destroy_rcu); } /* Start T3_rtx timer if it is not already running and update the heartbeat diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 74f6a704e374..ecbc4e3d83ad 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1660,6 +1660,10 @@ call_connect(struct rpc_task *task) task->tk_action = call_connect_status; if (task->tk_status < 0) return; + if (task->tk_flags & RPC_TASK_NOCONNECT) { + rpc_exit(task, -ENOTCONN); + return; + } xprt_connect(task); } } diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h index 74d948f5d5a1..779742cfc1ff 100644 --- a/net/sunrpc/netns.h +++ b/net/sunrpc/netns.h @@ -23,6 +23,7 @@ struct sunrpc_net { struct rpc_clnt *rpcb_local_clnt4; spinlock_t rpcb_clnt_lock; unsigned int rpcb_users; + unsigned int rpcb_is_af_local : 1; struct mutex gssp_lock; wait_queue_head_t gssp_wq; diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 3df764dc330c..1891a1022c17 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net) } static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt, - struct rpc_clnt *clnt4) + struct rpc_clnt *clnt4, + bool is_af_local) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); /* Protected by rpcb_create_local_mutex */ sn->rpcb_local_clnt = clnt; sn->rpcb_local_clnt4 = clnt4; + sn->rpcb_is_af_local = is_af_local ? 1 : 0; smp_wmb(); sn->rpcb_users = 1; dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: " @@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net) .program = &rpcb_program, .version = RPCBVERS_2, .authflavor = RPC_AUTH_NULL, + /* + * We turn off the idle timeout to prevent the kernel + * from automatically disconnecting the socket. + * Otherwise, we'd have to cache the mount namespace + * of the caller and somehow pass that to the socket + * reconnect code. + */ + .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT, }; struct rpc_clnt *clnt, *clnt4; int result = 0; @@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net) clnt4 = NULL; } - rpcb_set_local(net, clnt, clnt4); + rpcb_set_local(net, clnt, clnt4, true); out: return result; @@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net) clnt4 = NULL; } - rpcb_set_local(net, clnt, clnt4); + rpcb_set_local(net, clnt, clnt4, false); out: return result; @@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname, return rpc_create(&args); } -static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg) +static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set) { - int result, error = 0; + int flags = RPC_TASK_NOCONNECT; + int error, result = 0; + if (is_set || !sn->rpcb_is_af_local) + flags = RPC_TASK_SOFTCONN; msg->rpc_resp = &result; - error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN); + error = rpc_call_sync(clnt, msg, flags); if (error < 0) { dprintk("RPC: failed to contact local rpcbind " "server (errno %d).\n", -error); @@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short .rpc_argp = &map, }; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); + bool is_set = false; dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " "rpcbind\n", (port ? "" : "un"), prog, vers, prot, port); msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET]; - if (port) + if (port != 0) { msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; + is_set = true; + } - return rpcb_register_call(sn->rpcb_local_clnt, &msg); + return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set); } /* @@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn, const struct sockaddr_in *sin = (const struct sockaddr_in *)sap; struct rpcbind_args *map = msg->rpc_argp; unsigned short port = ntohs(sin->sin_port); + bool is_set = false; int result; map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); @@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn, map->r_addr, map->r_netid); msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; - if (port) + if (port != 0) { msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; + is_set = true; + } - result = rpcb_register_call(sn->rpcb_local_clnt4, msg); + result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set); kfree(map->r_addr); return result; } @@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn, const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap; struct rpcbind_args *map = msg->rpc_argp; unsigned short port = ntohs(sin6->sin6_port); + bool is_set = false; int result; map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); @@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn, map->r_addr, map->r_netid); msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; - if (port) + if (port != 0) { msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; + is_set = true; + } - result = rpcb_register_call(sn->rpcb_local_clnt4, msg); + result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set); kfree(map->r_addr); return result; } @@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn, map->r_addr = ""; msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; - return rpcb_register_call(sn->rpcb_local_clnt4, msg); + return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false); } /** diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index cb29ef7ba2f0..609c30c80816 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr) { struct tipc_link *l_ptr; struct tipc_link *temp_l_ptr; + struct tipc_link_req *temp_req; pr_info("Disabling bearer <%s>\n", b_ptr->name); spin_lock_bh(&b_ptr->lock); @@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr) list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { tipc_link_delete(l_ptr); } - if (b_ptr->link_req) - tipc_disc_delete(b_ptr->link_req); + temp_req = b_ptr->link_req; + b_ptr->link_req = NULL; spin_unlock_bh(&b_ptr->lock); + + if (temp_req) + tipc_disc_delete(temp_req); + memset(b_ptr, 0, sizeof(struct tipc_bearer)); } diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 57896eee1c25..545c08b8a1d4 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -346,7 +346,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { struct vsock_sock *vsk; list_for_each_entry(vsk, &vsock_connected_table[i], - connected_table); + connected_table) fn(sk_vsock(vsk)); } diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 3f7682a387b7..eefbd10e408f 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -1998,12 +1998,11 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address) * * Create or update the port list entry */ -static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address, +static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address, int act) { __be16 *bep; __be32 *be32p; - struct sockaddr_in6 *addr6; struct smk_port_label *spp; struct socket_smack *ssp = sk->sk_security; struct smack_known *skp; @@ -2025,10 +2024,9 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address, /* * Get the IP address and port from the address. */ - addr6 = (struct sockaddr_in6 *)address; - port = ntohs(addr6->sin6_port); - bep = (__be16 *)(&addr6->sin6_addr); - be32p = (__be32 *)(&addr6->sin6_addr); + port = ntohs(address->sin6_port); + bep = (__be16 *)(&address->sin6_addr); + be32p = (__be32 *)(&address->sin6_addr); /* * It's remote, so port lookup does no good. @@ -2060,9 +2058,9 @@ auditout: ad.a.u.net->family = sk->sk_family; ad.a.u.net->dport = port; if (act == SMK_RECEIVING) - ad.a.u.net->v6info.saddr = addr6->sin6_addr; + ad.a.u.net->v6info.saddr = address->sin6_addr; else - ad.a.u.net->v6info.daddr = addr6->sin6_addr; + ad.a.u.net->v6info.daddr = address->sin6_addr; #endif return smk_access(skp, object, MAY_WRITE, &ad); } @@ -2201,7 +2199,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, case PF_INET6: if (addrlen < sizeof(struct sockaddr_in6)) return -EINVAL; - rc = smk_ipv6_port_check(sock->sk, sap, SMK_CONNECTING); + rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap, + SMK_CONNECTING); break; } return rc; @@ -3034,7 +3033,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name; - struct sockaddr *sap = (struct sockaddr *) msg->msg_name; + struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name; int rc = 0; /* @@ -3121,9 +3120,8 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap, return smack_net_ambient; } -static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr *sap) +static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip) { - struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; u8 nexthdr; int offset; int proto = -EINVAL; @@ -3181,7 +3179,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) struct netlbl_lsm_secattr secattr; struct socket_smack *ssp = sk->sk_security; struct smack_known *skp; - struct sockaddr sadd; + struct sockaddr_in6 sadd; int rc = 0; struct smk_audit_info ad; #ifdef CONFIG_AUDIT diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 99db892d7299..98969541cbcc 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -743,7 +743,7 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) mutex_lock(&stream->device->lock); switch (_IOC_NR(cmd)) { case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION): - put_user(SNDRV_COMPRESS_VERSION, + retval = put_user(SNDRV_COMPRESS_VERSION, (int __user *)arg) ? -EFAULT : 0; break; case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 7c11d46b84d3..48a9d004d6d9 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -860,7 +860,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec, } } if (id < 0 && quirk) { - for (q = quirk; q->subvendor; q++) { + for (q = quirk; q->subvendor || q->subdevice; q++) { unsigned int vendorid = q->subdevice | (q->subvendor << 16); unsigned int mask = 0xffff0000 | q->subdevice_mask; diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 8e77cbbad871..e3c7ba8d7582 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1, } #define nid_has_mute(codec, nid, dir) \ - check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE) + check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) #define nid_has_volume(codec, nid, dir) \ check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) @@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, if (enable) val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; } - if (caps & AC_AMPCAP_MUTE) { + if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { if (!enable) val |= HDA_AMP_MUTE; } @@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec, { unsigned int mask = 0xff; - if (caps & AC_AMPCAP_MUTE) { + if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) mask &= ~0x80; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8bd226149868..f303cd898515 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1031,6 +1031,7 @@ enum { ALC880_FIXUP_GPIO2, ALC880_FIXUP_MEDION_RIM, ALC880_FIXUP_LG, + ALC880_FIXUP_LG_LW25, ALC880_FIXUP_W810, ALC880_FIXUP_EAPD_COEF, ALC880_FIXUP_TCL_S700, @@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = { { } } }, + [ALC880_FIXUP_LG_LW25] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x0181344f }, /* line-in */ + { 0x1b, 0x0321403f }, /* headphone */ + { } + } + }, [ALC880_FIXUP_W810] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = { SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), + SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25), SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), /* Below is the copied entries from alc880_quirks.c. @@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 92b9b4324372..6d1924c19abf 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -2819,6 +2819,7 @@ static const struct hda_pintbl ecs202_pin_configs[] = { /* codec SSIDs for Intel Mac sharing the same PCI SSID 8384:7680 */ static const struct snd_pci_quirk stac922x_intel_mac_fixup_tbl[] = { + SND_PCI_QUIRK(0x0000, 0x0100, "Mac Mini", STAC_INTEL_MAC_V3), SND_PCI_QUIRK(0x106b, 0x0800, "Mac", STAC_INTEL_MAC_V1), SND_PCI_QUIRK(0x106b, 0x0600, "Mac", STAC_INTEL_MAC_V2), SND_PCI_QUIRK(0x106b, 0x0700, "Mac", STAC_INTEL_MAC_V2), diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c index d6f7694fcad4..c8a2de103c5f 100644 --- a/sound/soc/au1x/ac97c.c +++ b/sound/soc/au1x/ac97c.c @@ -341,7 +341,7 @@ static struct platform_driver au1xac97c_driver = { .remove = au1xac97c_drvremove, }; -module_platform_driver(&au1xac97c_driver); +module_platform_driver(au1xac97c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Au1000/1500/1100 AC97C ASoC driver"); diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c index efb1daecd0dd..e82eb373a731 100644 --- a/sound/soc/blackfin/bf5xx-ac97.c +++ b/sound/soc/blackfin/bf5xx-ac97.c @@ -294,11 +294,12 @@ static int asoc_bfin_ac97_probe(struct platform_device *pdev) /* Request PB3 as reset pin */ ret = devm_gpio_request_one(&pdev->dev, CONFIG_SND_BF5XX_RESET_GPIO_NUM, - GPIOF_OUT_INIT_HIGH, "SND_AD198x RESET") { + GPIOF_OUT_INIT_HIGH, "SND_AD198x RESET"); + if (ret) { dev_err(&pdev->dev, "Failed to request GPIO_%d for reset: %d\n", CONFIG_SND_BF5XX_RESET_GPIO_NUM, ret); - goto gpio_err; + return ret; } #endif diff --git a/sound/soc/blackfin/bf5xx-ac97.h b/sound/soc/blackfin/bf5xx-ac97.h index 15c635e33f4d..0c3e22d90a8d 100644 --- a/sound/soc/blackfin/bf5xx-ac97.h +++ b/sound/soc/blackfin/bf5xx-ac97.h @@ -9,7 +9,6 @@ #ifndef _BF5XX_AC97_H #define _BF5XX_AC97_H -extern struct snd_ac97_bus_ops bf5xx_ac97_ops; extern struct snd_ac97 *ac97; /* Frame format in memory, only support stereo currently */ struct ac97_frame { diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 987f728718c5..be2ba1b6fe4a 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c @@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); +static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0); + static const unsigned int limiter_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), @@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { SOC_ENUM("Beep Pitch", beep_pitch_enum), SOC_ENUM("Beep on Time", beep_ontime_enum), SOC_ENUM("Beep off Time", beep_offtime_enum), - SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv), + SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL, + 0, 0x07, 0x1f, beep_tlv), SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 6c8a9e7bee25..760e8bfeacaa 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w, static int power_vag_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { + const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP; + switch (event) { case SND_SOC_DAPM_POST_PMU: snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, @@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w, break; case SND_SOC_DAPM_PRE_PMD: - snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, - SGTL5000_VAG_POWERUP, 0); - msleep(400); + /* + * Don't clear VAG_POWERUP, when both DAC and ADC are + * operational to prevent inadvertently starving the + * other one of them. + */ + if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) & + mask) != mask) { + snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, + SGTL5000_VAG_POWERUP, 0); + msleep(400); + } break; default: break; @@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = { SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", SGTL5000_CHIP_ANA_ADC_CTRL, - 8, 2, 0, capture_6db_attenuate), + 8, 1, 0, capture_6db_attenuate), SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), SOC_DOUBLE_TLV("Headphone Playback Volume", diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c index f5e835662cdc..10adc4145d46 100644 --- a/sound/soc/codecs/wm0010.c +++ b/sound/soc/codecs/wm0010.c @@ -410,6 +410,16 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec) rec->command, rec->length); len = rec->length + 8; + xfer = kzalloc(sizeof(*xfer), GFP_KERNEL); + if (!xfer) { + dev_err(codec->dev, "Failed to allocate xfer\n"); + ret = -ENOMEM; + goto abort; + } + + xfer->codec = codec; + list_add_tail(&xfer->list, &xfer_list); + out = kzalloc(len, GFP_KERNEL); if (!out) { dev_err(codec->dev, @@ -417,6 +427,7 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec) ret = -ENOMEM; goto abort1; } + xfer->t.rx_buf = out; img = kzalloc(len, GFP_KERNEL); if (!img) { @@ -425,24 +436,13 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec) ret = -ENOMEM; goto abort1; } + xfer->t.tx_buf = img; byte_swap_64((u64 *)&rec->command, img, len); - xfer = kzalloc(sizeof(*xfer), GFP_KERNEL); - if (!xfer) { - dev_err(codec->dev, "Failed to allocate xfer\n"); - ret = -ENOMEM; - goto abort1; - } - - xfer->codec = codec; - list_add_tail(&xfer->list, &xfer_list); - spi_message_init(&xfer->m); xfer->m.complete = wm0010_boot_xfer_complete; xfer->m.context = xfer; - xfer->t.tx_buf = img; - xfer->t.rx_buf = out; xfer->t.len = len; xfer->t.bits_per_word = 8; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index b94190820e8c..4375c9f2b791 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w) return -EINVAL; } - path = list_first_entry(&w->sources, struct snd_soc_dapm_path, - list_sink); - if (!path) { + if (list_empty(&w->sources)) { dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); return -EINVAL; } + path = list_first_entry(&w->sources, struct snd_soc_dapm_path, + list_sink); + ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); if (ret < 0) return ret; @@ -2733,7 +2734,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, } mutex_unlock(&card->dapm_mutex); - return 0; + return change; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw); @@ -2861,7 +2862,6 @@ int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; int change; - int ret = 0; int wi; if (ucontrol->value.enumerated.item[0] >= e->max) @@ -2881,7 +2881,7 @@ int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol, } mutex_unlock(&card->dapm_mutex); - return ret; + return change; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_virt); diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index d04146cad61f..47565fd04505 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream, reg = TEGRA30_I2S_CIF_RX_CTRL; } else { val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; - reg = TEGRA30_I2S_CIF_RX_CTRL; + reg = TEGRA30_I2S_CIF_TX_CTRL; } regmap_write(i2s->regmap, reg, val); diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c index 9e6e3ffd86bb..23452ee617e1 100644 --- a/sound/usb/6fire/comm.c +++ b/sound/usb/6fire/comm.c @@ -110,19 +110,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev) static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request, u8 reg, u8 value) { - u8 buffer[13]; /* 13: maximum length of message */ + u8 *buffer; + int ret; + + /* 13: maximum length of message */ + buffer = kmalloc(13, GFP_KERNEL); + if (!buffer) + return -ENOMEM; usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00); - return usb6fire_comm_send_buffer(buffer, rt->chip->dev); + ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev); + + kfree(buffer); + return ret; } static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request, u8 reg, u8 vl, u8 vh) { - u8 buffer[13]; /* 13: maximum length of message */ + u8 *buffer; + int ret; + + /* 13: maximum length of message */ + buffer = kmalloc(13, GFP_KERNEL); + if (!buffer) + return -ENOMEM; usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh); - return usb6fire_comm_send_buffer(buffer, rt->chip->dev); + ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev); + + kfree(buffer); + return ret; } int usb6fire_comm_init(struct sfire_chip *chip) @@ -135,6 +153,12 @@ int usb6fire_comm_init(struct sfire_chip *chip) if (!rt) return -ENOMEM; + rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL); + if (!rt->receiver_buffer) { + kfree(rt); + return -ENOMEM; + } + urb = &rt->receiver; rt->serial = 1; rt->chip = chip; @@ -153,6 +177,7 @@ int usb6fire_comm_init(struct sfire_chip *chip) urb->interval = 1; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret < 0) { + kfree(rt->receiver_buffer); kfree(rt); snd_printk(KERN_ERR PREFIX "cannot create comm data receiver."); return ret; @@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip) void usb6fire_comm_destroy(struct sfire_chip *chip) { - kfree(chip->comm); + struct comm_runtime *rt = chip->comm; + + kfree(rt->receiver_buffer); + kfree(rt); chip->comm = NULL; } diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h index 6a0840b0dcff..780d5ed8e5d8 100644 --- a/sound/usb/6fire/comm.h +++ b/sound/usb/6fire/comm.h @@ -24,7 +24,7 @@ struct comm_runtime { struct sfire_chip *chip; struct urb receiver; - u8 receiver_buffer[COMM_RECEIVER_BUFSIZE]; + u8 *receiver_buffer; u8 serial; /* urb serial */ diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c index 26722423330d..f3dd7266c391 100644 --- a/sound/usb/6fire/midi.c +++ b/sound/usb/6fire/midi.c @@ -19,6 +19,10 @@ #include "chip.h" #include "comm.h" +enum { + MIDI_BUFSIZE = 64 +}; + static void usb6fire_midi_out_handler(struct urb *urb) { struct midi_runtime *rt = urb->context; @@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip) if (!rt) return -ENOMEM; + rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL); + if (!rt->out_buffer) { + kfree(rt); + return -ENOMEM; + } + rt->chip = chip; rt->in_received = usb6fire_midi_in_received; rt->out_buffer[0] = 0x80; /* 'send midi' command */ @@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip) ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); if (ret < 0) { + kfree(rt->out_buffer); kfree(rt); snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); return ret; @@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip) void usb6fire_midi_destroy(struct sfire_chip *chip) { - kfree(chip->midi); + struct midi_runtime *rt = chip->midi; + + kfree(rt->out_buffer); + kfree(rt); chip->midi = NULL; } diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h index c321006e5430..84851b9f5559 100644 --- a/sound/usb/6fire/midi.h +++ b/sound/usb/6fire/midi.h @@ -16,10 +16,6 @@ #include "common.h" -enum { - MIDI_BUFSIZE = 64 -}; - struct midi_runtime { struct sfire_chip *chip; struct snd_rawmidi *instance; @@ -32,7 +28,7 @@ struct midi_runtime { struct snd_rawmidi_substream *out; struct urb out_urb; u8 out_serial; /* serial number of out packet */ - u8 out_buffer[MIDI_BUFSIZE]; + u8 *out_buffer; int buffer_offset; void (*in_received)(struct midi_runtime *rt, u8 *data, int length); diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c index 3d2551cc10f2..b5eb97fdc842 100644 --- a/sound/usb/6fire/pcm.c +++ b/sound/usb/6fire/pcm.c @@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb, urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; } +static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt) +{ + int i; + + for (i = 0; i < PCM_N_URBS; i++) { + rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB + * PCM_MAX_PACKET_SIZE, GFP_KERNEL); + if (!rt->out_urbs[i].buffer) + return -ENOMEM; + rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB + * PCM_MAX_PACKET_SIZE, GFP_KERNEL); + if (!rt->in_urbs[i].buffer) + return -ENOMEM; + } + return 0; +} + +static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt) +{ + int i; + + for (i = 0; i < PCM_N_URBS; i++) { + kfree(rt->out_urbs[i].buffer); + kfree(rt->in_urbs[i].buffer); + } +} + int usb6fire_pcm_init(struct sfire_chip *chip) { int i; @@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip) if (!rt) return -ENOMEM; + ret = usb6fire_pcm_buffers_init(rt); + if (ret) { + usb6fire_pcm_buffers_destroy(rt); + kfree(rt); + return ret; + } + rt->chip = chip; rt->stream_state = STREAM_DISABLED; rt->rate = ARRAY_SIZE(rates); @@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip) ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); if (ret < 0) { + usb6fire_pcm_buffers_destroy(rt); kfree(rt); snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); return ret; @@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); if (ret) { + usb6fire_pcm_buffers_destroy(rt); kfree(rt); snd_printk(KERN_ERR PREFIX "error preallocating pcm buffers.\n"); @@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip) void usb6fire_pcm_destroy(struct sfire_chip *chip) { - kfree(chip->pcm); + struct pcm_runtime *rt = chip->pcm; + + usb6fire_pcm_buffers_destroy(rt); + kfree(rt); chip->pcm = NULL; } diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h index 9b01133ee3fe..f5779d6182c6 100644 --- a/sound/usb/6fire/pcm.h +++ b/sound/usb/6fire/pcm.h @@ -32,7 +32,7 @@ struct pcm_urb { struct urb instance; struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; /* END DO NOT SEPARATE */ - u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE]; + u8 *buffer; struct pcm_urb *peer; }; diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 7a444b5501d9..659950e5b94f 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -591,17 +591,16 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep, ep->stride = frame_bits >> 3; ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0; - /* calculate max. frequency */ - if (ep->maxpacksize) { + /* assume max. frequency is 25% higher than nominal */ + ep->freqmax = ep->freqn + (ep->freqn >> 2); + maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3)) + >> (16 - ep->datainterval); + /* but wMaxPacketSize might reduce this */ + if (ep->maxpacksize && ep->maxpacksize < maxsize) { /* whatever fits into a max. size packet */ maxsize = ep->maxpacksize; ep->freqmax = (maxsize / (frame_bits >> 3)) << (16 - ep->datainterval); - } else { - /* no max. packet size: just take 25% higher than nominal */ - ep->freqmax = ep->freqn + (ep->freqn >> 2); - maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3)) - >> (16 - ep->datainterval); } if (ep->fill_max) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index d5438083fd6a..95558ef4a7a0 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ + case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ case USB_ID(0x046d, 0x0991): /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 1bc45e71f1fe..0df9ede99dfd 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip, if (altsd->bNumEndpoints < 1) return -ENODEV; epd = get_endpoint(alts, 0); - if (!usb_endpoint_xfer_bulk(epd) || + if (!usb_endpoint_xfer_bulk(epd) && !usb_endpoint_xfer_int(epd)) return -ENODEV; switch (USB_ID_VENDOR(chip->usb_id)) { case 0x0499: /* Yamaha */ err = create_yamaha_midi_quirk(chip, iface, driver, alts); - if (err < 0 && err != -ENODEV) + if (err != -ENODEV) return err; break; case 0x0582: /* Roland */ err = create_roland_midi_quirk(chip, iface, driver, alts); - if (err < 0 && err != -ENODEV) + if (err != -ENODEV) return err; break; } |