diff options
625 files changed, 16191 insertions, 7137 deletions
@@ -123,6 +123,7 @@ Mark Brown <broonie@sirena.org.uk> Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> +Mathieu Othacehe <m.othacehe@gmail.com> Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com> Matthew Wilcox <willy@infradead.org> <matthew@wil.cx> Matthew Wilcox <willy@infradead.org> <mawilcox@linuxonhyperv.com> @@ -842,10 +842,9 @@ D: ax25-utils maintainer. N: Helge Deller E: deller@gmx.de -E: hdeller@redhat.de -D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver -S: Schimmelsrain 1 -S: D-69231 Rauenberg +W: http://www.parisc-linux.org/ +D: PA-RISC Linux architecture maintainer +D: LASI-, ASP-, WAX-, LCD/LED-driver S: Germany N: Jean Delvare @@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape S: South Africa N: Grant Grundler -E: grundler@parisc-linux.org +E: grantgrundler@gmail.com W: http://obmouse.sourceforge.net/ W: http://www.parisc-linux.org/ D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver @@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206 S: USA N: Kyle McMartin -E: kyle@parisc-linux.org +E: kyle@mcmartin.ca D: Linux/PARISC hacker D: AD1889 sound driver S: Ottawa, Canada @@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct S: Cupertino, CA 95014 S: USA -N: Thibaut Varene -E: T-Bone@parisc-linux.org -W: http://www.parisc-linux.org/~varenet/ -P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063 +N: Thibaut Varène +E: hacks+kernel@slashdirt.org +W: http://hacks.slashdirt.org/ D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there D: AD1889 sound driver -S: Paris, France +S: France N: Heikki Vatiainen E: hessu@cs.tut.fi diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst index 0797eec76be1..47e577264198 100644 --- a/Documentation/admin-guide/README.rst +++ b/Documentation/admin-guide/README.rst @@ -1,9 +1,9 @@ .. _readme: -Linux kernel release 4.x <http://kernel.org/> +Linux kernel release 5.x <http://kernel.org/> ============================================= -These are the release notes for Linux version 4. Read them carefully, +These are the release notes for Linux version 5. Read them carefully, as they tell you what this is all about, explain how to install the kernel, and what to do if something goes wrong. @@ -63,7 +63,7 @@ Installing the kernel source directory where you have permissions (e.g. your home directory) and unpack it:: - xz -cd linux-4.X.tar.xz | tar xvf - + xz -cd linux-5.x.tar.xz | tar xvf - Replace "X" with the version number of the latest kernel. @@ -72,26 +72,26 @@ Installing the kernel source files. They should match the library, and not get messed up by whatever the kernel-du-jour happens to be. - - You can also upgrade between 4.x releases by patching. Patches are + - You can also upgrade between 5.x releases by patching. Patches are distributed in the xz format. To install by patching, get all the newer patch files, enter the top level directory of the kernel source - (linux-4.X) and execute:: + (linux-5.x) and execute:: - xz -cd ../patch-4.x.xz | patch -p1 + xz -cd ../patch-5.x.xz | patch -p1 - Replace "x" for all versions bigger than the version "X" of your current + Replace "x" for all versions bigger than the version "x" of your current source tree, **in_order**, and you should be ok. You may want to remove the backup files (some-file-name~ or some-file-name.orig), and make sure that there are no failed patches (some-file-name# or some-file-name.rej). If there are, either you or I have made a mistake. - Unlike patches for the 4.x kernels, patches for the 4.x.y kernels + Unlike patches for the 5.x kernels, patches for the 5.x.y kernels (also known as the -stable kernels) are not incremental but instead apply - directly to the base 4.x kernel. For example, if your base kernel is 4.0 - and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1 - and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and - want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is, - patch -R) **before** applying the 4.0.3 patch. You can read more on this in + directly to the base 5.x kernel. For example, if your base kernel is 5.0 + and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1 + and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and + want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is, + patch -R) **before** applying the 5.0.3 patch. You can read more on this in :ref:`Documentation/process/applying-patches.rst <applying_patches>`. Alternatively, the script patch-kernel can be used to automate this @@ -114,7 +114,7 @@ Installing the kernel source Software requirements --------------------- - Compiling and running the 4.x kernels requires up-to-date + Compiling and running the 5.x kernels requires up-to-date versions of various software packages. Consult :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers required and how to get updates for these packages. Beware that using @@ -132,12 +132,12 @@ Build directory for the kernel place for the output files (including .config). Example:: - kernel source code: /usr/src/linux-4.X + kernel source code: /usr/src/linux-5.x build directory: /home/name/build/kernel To configure and build the kernel, use:: - cd /usr/src/linux-4.X + cd /usr/src/linux-5.x make O=/home/name/build/kernel menuconfig make O=/home/name/build/kernel sudo make O=/home/name/build/kernel modules_install install diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt index a4b056761eaa..d5f68ac78d15 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt +++ b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt @@ -23,6 +23,20 @@ Required properties: Optional properties: - clock-output-names : Should contain name for output clock. +- rohm,reset-snvs-powered : Transfer BD718x7 to SNVS state at reset. + +The BD718x7 supports two different HW states as reset target states. States +are called as SNVS and READY. At READY state all the PMIC power outputs go +down and OTP is reload. At the SNVS state all other logic and external +devices apart from the SNVS power domain are shut off. Please refer to NXP +i.MX8 documentation for further information regarding SNVS state. When a +reset is done via SNVS state the PMIC OTP data is not reload. This causes +power outputs that have been under SW control to stay down when reset has +switched power state to SNVS. If reset is done via READY state the power +outputs will be returned to HW control by OTP loading. Thus the reset +target state is set to READY by default. If SNVS state is used the boot +crucial regulators must have the regulator-always-on and regulator-boot-on +properties set in regulator node. Example: @@ -43,6 +57,7 @@ Example: #clock-cells = <0>; clocks = <&osc 0>; clock-output-names = "bd71837-32k-out"; + rohm,reset-snvs-powered; regulators { buck1: BUCK1 { @@ -50,8 +65,10 @@ Example: regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; + regulator-always-on; regulator-ramp-delay = <1250>; }; + // [...] }; }; diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 9201a7d8d7b0..540c65ed9cba 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -15,6 +15,7 @@ Required properties: "fsl,imx6q-usdhc" "fsl,imx6sl-usdhc" "fsl,imx6sx-usdhc" + "fsl,imx6ull-usdhc" "fsl,imx7d-usdhc" "fsl,imx8qxp-usdhc" diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt index f5a0923b34ca..cdbcfd3a4ff2 100644 --- a/Documentation/devicetree/bindings/mmc/mmc.txt +++ b/Documentation/devicetree/bindings/mmc/mmc.txt @@ -62,6 +62,8 @@ Optional properties: be referred to mmc-pwrseq-simple.txt. But now it's reused as a tunable delay waiting for I/O signalling and card power supply to be stable, regardless of whether pwrseq-simple is used. Default to 10ms if no available. +- supports-cqe : The presence of this property indicates that the corresponding + MMC host controller supports HW command queue feature. *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line polarity properties, we have to fix the meaning of the "normal" and "inverted" diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt index 32b4b4e41923..2cecdc71d94c 100644 --- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt @@ -39,12 +39,16 @@ sdhci@c8000200 { bus-width = <8>; }; -Optional properties for Tegra210 and Tegra186: +Optional properties for Tegra210, Tegra186 and Tegra194: - pinctrl-names, pinctrl-0, pinctrl-1 : Specify pad voltage configurations. Valid pinctrl-names are "sdmmc-3v3" and "sdmmc-1v8" for controllers supporting multiple voltage levels. The order of names should correspond to the pin configuration states in pinctrl-0 and pinctrl-1. +- pinctrl-names : "sdmmc-3v3-drv" and "sdmmc-1v8-drv" are applicable for + Tegra210 where pad config registers are in the pinmux register domain + for pull-up-strength and pull-down-strength values configuration when + using pads at 3V3 and 1V8 levels. - nvidia,only-1-8-v : The presence of this property indicates that the controller operates at a 1.8 V fixed I/O voltage. - nvidia,pad-autocal-pull-up-offset-3v3, diff --git a/Documentation/devicetree/bindings/mmc/ti-omap.txt b/Documentation/devicetree/bindings/mmc/ti-omap.txt index 8de579969763..02fd31cf361d 100644 --- a/Documentation/devicetree/bindings/mmc/ti-omap.txt +++ b/Documentation/devicetree/bindings/mmc/ti-omap.txt @@ -24,31 +24,3 @@ Examples: dmas = <&sdma 61 &sdma 62>; dma-names = "tx", "rx"; }; - -* TI MMC host controller for OMAP1 and 2420 - -The MMC Host Controller on TI OMAP1 and 2420 family provides -an interface for MMC, SD, and SDIO types of memory cards. - -This file documents differences between the core properties described -by mmc.txt and the properties used by the omap mmc driver. - -Note that this driver will not work with omap2430 or later omaps, -please see the omap hsmmc driver for the current omaps. - -Required properties: -- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers -- ti,hwmods: For 2420, must be "msdi<n>", where n is controller - instance starting 1 - -Examples: - - msdi1: mmc@4809c000 { - compatible = "ti,omap2420-mmc"; - ti,hwmods = "msdi1"; - reg = <0x4809c000 0x80>; - interrupts = <83>; - dmas = <&sdma 61 &sdma 62>; - dma-names = "tx", "rx"; - }; - diff --git a/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt b/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt new file mode 100644 index 000000000000..3983c11e062c --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt @@ -0,0 +1,60 @@ +Amlogic NAND Flash Controller (NFC) for GXBB/GXL/AXG family SoCs + +This file documents the properties in addition to those available in +the MTD NAND bindings. + +Required properties: +- compatible : contains one of: + - "amlogic,meson-gxl-nfc" + - "amlogic,meson-axg-nfc" +- clocks : + A list of phandle + clock-specifier pairs for the clocks listed + in clock-names. + +- clock-names: Should contain the following: + "core" - NFC module gate clock + "device" - device clock from eMMC sub clock controller + "rx" - rx clock phase + "tx" - tx clock phase + +- amlogic,mmc-syscon : Required for NAND clocks, it's shared with SD/eMMC + controller port C + +Optional children nodes: +Children nodes represent the available nand chips. + +Other properties: +see Documentation/devicetree/bindings/mtd/nand.txt for generic bindings. + +Example demonstrate on AXG SoC: + + sd_emmc_c_clkc: mmc@7000 { + compatible = "amlogic,meson-axg-mmc-clkc", "syscon"; + reg = <0x0 0x7000 0x0 0x800>; + }; + + nand-controller@7800 { + compatible = "amlogic,meson-axg-nfc"; + reg = <0x0 0x7800 0x0 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>; + + clocks = <&clkc CLKID_SD_EMMC_C>, + <&sd_emmc_c_clkc CLKID_MMC_DIV>, + <&sd_emmc_c_clkc CLKID_MMC_PHASE_RX>, + <&sd_emmc_c_clkc CLKID_MMC_PHASE_TX>; + clock-names = "core", "device", "rx", "tx"; + amlogic,mmc-syscon = <&sd_emmc_c_clkc>; + + pinctrl-names = "default"; + pinctrl-0 = <&nand_pins>; + + nand@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + + nand-on-flash-bbt; + }; + }; diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt index bb2075df9b38..4345c3a6f530 100644 --- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt @@ -4,6 +4,7 @@ Required properties: - compatible : should be one of the following: Generic default - "cdns,qspi-nor". For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor". + For TI AM654 SoC - "ti,am654-ospi", "cdns,qspi-nor". - reg : Contains two entries, each of which is a tuple consisting of a physical address and length. The first entry is the address and length of the controller register set. The second entry is the diff --git a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt index 56d3668e2c50..a12e3b5c495d 100644 --- a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt @@ -1,4 +1,4 @@ -* Serial NOR flash controller for MTK MT81xx (and similar) +* Serial NOR flash controller for MediaTek SoCs Required properties: - compatible: For mt8173, compatible should be "mediatek,mt8173-nor", @@ -10,6 +10,7 @@ Required properties: "mediatek,mt2712-nor", "mediatek,mt8173-nor" "mediatek,mt7622-nor", "mediatek,mt8173-nor" "mediatek,mt7623-nor", "mediatek,mt8173-nor" + "mediatek,mt7629-nor", "mediatek,mt8173-nor" "mediatek,mt8173-nor" - reg: physical base address and length of the controller's register - clocks: the phandle of the clocks needed by the nor controller diff --git a/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt b/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt new file mode 100644 index 000000000000..ad2bef826582 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt @@ -0,0 +1,61 @@ +STMicroelectronics Flexible Memory Controller 2 (FMC2) +NAND Interface + +Required properties: +- compatible: Should be one of: + * st,stm32mp15-fmc2 +- reg: NAND flash controller memory areas. + First region contains the register location. + Regions 2 to 4 respectively contain the data, command, + and address space for CS0. + Regions 5 to 7 contain the same areas for CS1. +- interrupts: The interrupt number +- pinctrl-0: Standard Pinctrl phandle (see: pinctrl/pinctrl-bindings.txt) +- clocks: The clock needed by the NAND flash controller + +Optional properties: +- resets: Reference to a reset controller asserting the FMC controller +- dmas: DMA specifiers (see: dma/stm32-mdma.txt) +- dma-names: Must be "tx", "rx" and "ecc" + +* NAND device bindings: + +Required properties: +- reg: describes the CS lines assigned to the NAND device. + +Optional properties: +- nand-on-flash-bbt: see nand.txt +- nand-ecc-strength: see nand.txt +- nand-ecc-step-size: see nand.txt + +The following ECC strength and step size are currently supported: + - nand-ecc-strength = <1>, nand-ecc-step-size = <512> (Hamming) + - nand-ecc-strength = <4>, nand-ecc-step-size = <512> (BCH4) + - nand-ecc-strength = <8>, nand-ecc-step-size = <512> (BCH8) (default) + +Example: + + fmc: nand-controller@58002000 { + compatible = "st,stm32mp15-fmc2"; + reg = <0x58002000 0x1000>, + <0x80000000 0x1000>, + <0x88010000 0x1000>, + <0x88020000 0x1000>, + <0x81000000 0x1000>, + <0x89010000 0x1000>, + <0x89020000 0x1000>; + interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&rcc FMC_K>; + resets = <&rcc FMC_R>; + pinctrl-names = "default"; + pinctrl-0 = <&fmc_pins_a>; + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + reg = <0>; + nand-on-flash-bbt; + #address-cells = <1>; + #size-cells = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/regulator/fan53555.txt b/Documentation/devicetree/bindings/regulator/fan53555.txt index 54a3f2c80e3a..e7fc045281d1 100644 --- a/Documentation/devicetree/bindings/regulator/fan53555.txt +++ b/Documentation/devicetree/bindings/regulator/fan53555.txt @@ -1,7 +1,8 @@ Binding for Fairchild FAN53555 regulators Required properties: - - compatible: one of "fcs,fan53555", "silergy,syr827", "silergy,syr828" + - compatible: one of "fcs,fan53555", "fcs,fan53526", "silergy,syr827" or + "silergy,syr828" - reg: I2C address Optional properties: diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt deleted file mode 100644 index 0c2a6c8a1536..000000000000 --- a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt +++ /dev/null @@ -1,35 +0,0 @@ -Fixed Voltage regulators - -Required properties: -- compatible: Must be "regulator-fixed"; -- regulator-name: Defined in regulator.txt as optional, but required here. - -Optional properties: -- gpio: gpio to use for enable control -- startup-delay-us: startup time in microseconds -- enable-active-high: Polarity of GPIO is Active high -If this property is missing, the default assumed is Active low. -- gpio-open-drain: GPIO is open drain type. - If this property is missing then default assumption is false. --vin-supply: Input supply name. - -Any property defined as part of the core regulator -binding, defined in regulator.txt, can also be used. -However a fixed voltage regulator is expected to have the -regulator-min-microvolt and regulator-max-microvolt -to be the same. - -Example: - - abc: fixedregulator@0 { - compatible = "regulator-fixed"; - regulator-name = "fixed-supply"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - gpio = <&gpio1 16 0>; - startup-delay-us = <70000>; - enable-active-high; - regulator-boot-on; - gpio-open-drain; - vin-supply = <&parent_reg>; - }; diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml new file mode 100644 index 000000000000..d289c2f7455a --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/regulator/fixed-regulator.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Fixed Voltage regulators + +maintainers: + - Liam Girdwood <lgirdwood@gmail.com> + - Mark Brown <broonie@kernel.org> + +description: + Any property defined as part of the core regulator binding, defined in + regulator.txt, can also be used. However a fixed voltage regulator is + expected to have the regulator-min-microvolt and regulator-max-microvolt + to be the same. + +properties: + compatible: + const: regulator-fixed + + regulator-name: true + + gpio: + description: gpio to use for enable control + maxItems: 1 + + startup-delay-us: + description: startup time in microseconds + $ref: /schemas/types.yaml#/definitions/uint32 + + enable-active-high: + description: + Polarity of GPIO is Active high. If this property is missing, + the default assumed is Active low. + type: boolean + + gpio-open-drain: + description: + GPIO is open drain type. If this property is missing then default + assumption is false. + type: boolean + + vin-supply: + description: Input supply phandle. + $ref: /schemas/types.yaml#/definitions/phandle + +required: + - compatible + - regulator-name + +examples: + - | + reg_1v8: regulator-1v8 { + compatible = "regulator-fixed"; + regulator-name = "1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + gpio = <&gpio1 16 0>; + startup-delay-us = <70000>; + enable-active-high; + regulator-boot-on; + gpio-open-drain; + vin-supply = <&parent_reg>; + }; +... diff --git a/Documentation/devicetree/bindings/regulator/max77650-regulator.txt b/Documentation/devicetree/bindings/regulator/max77650-regulator.txt new file mode 100644 index 000000000000..f1cbe813c30f --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/max77650-regulator.txt @@ -0,0 +1,41 @@ +Regulator driver for MAX77650 PMIC from Maxim Integrated. + +This module is part of the MAX77650 MFD device. For more details +see Documentation/devicetree/bindings/mfd/max77650.txt. + +The regulator controller is represented as a sub-node of the PMIC node +on the device tree. + +The device has a single LDO regulator and a SIMO buck-boost regulator with +three independent power rails. + +Required properties: +-------------------- +- compatible: Must be "maxim,max77650-regulator" + +Each rail must be instantiated under the regulators subnode of the top PMIC +node. Up to four regulators can be defined. For standard regulator properties +refer to Documentation/devicetree/bindings/regulator/regulator.txt. + +Available regulator compatible strings are: "ldo", "sbb0", "sbb1", "sbb2". + +Example: +-------- + + regulators { + compatible = "maxim,max77650-regulator"; + + max77650_ldo: regulator@0 { + regulator-compatible = "ldo"; + regulator-name = "max77650-ldo"; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <2937500>; + }; + + max77650_sbb0: regulator@1 { + regulator-compatible = "sbb0"; + regulator-name = "max77650-sbb0"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1587500>; + }; + }; diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt index f9be1acf891c..4d3b12b92cb3 100644 --- a/Documentation/devicetree/bindings/regulator/pfuze100.txt +++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt @@ -8,7 +8,7 @@ Optional properties: - fsl,pfuze-support-disable-sw: Boolean, if present disable all unused switch regulators to save power consumption. Attention, ensure that all important regulators (e.g. DDR ref, DDR supply) has set the "regulator-always-on" - property. If not present, the switched regualtors are always on and can't be + property. If not present, the switched regulators are always on and can't be disabled. This binding is a workaround to keep backward compatibility with old dtb's which rely on the fact that the switched regulators are always on and don't mark them explicit as "regulator-always-on". diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt new file mode 100644 index 000000000000..698cfc3bc3dd --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt @@ -0,0 +1,68 @@ +ROHM BD70528 Power Management Integrated Circuit regulator bindings + +Required properties: + - regulator-name: should be "buck1", "buck2", "buck3", "ldo1", "ldo2", "ldo3", + "led_ldo1", "led_ldo2" + +List of regulators provided by this controller. BD70528 regulators node +should be sub node of the BD70528 MFD node. See BD70528 MFD bindings at +Documentation/devicetree/bindings/mfd/rohm,bd70528-pmic.txt + +The valid names for BD70528 regulator nodes are: +BUCK1, BUCK2, BUCK3, LDO1, LDO2, LDO3, LED_LDO1, LED_LDO2 + +Optional properties: +- Any optional property defined in bindings/regulator/regulator.txt + +Example: +regulators { + buck1: BUCK1 { + regulator-name = "buck1"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3400000>; + regulator-boot-on; + regulator-ramp-delay = <125>; + }; + buck2: BUCK2 { + regulator-name = "buck2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-ramp-delay = <125>; + }; + buck3: BUCK3 { + regulator-name = "buck3"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-ramp-delay = <250>; + }; + ldo1: LDO1 { + regulator-name = "ldo1"; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + }; + ldo2: LDO2 { + regulator-name = "ldo2"; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + }; + + ldo3: LDO3 { + regulator-name = "ldo3"; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <3300000>; + }; + led_ldo1: LED_LDO1 { + regulator-name = "led_ldo1"; + regulator-min-microvolt = <200000>; + regulator-max-microvolt = <300000>; + }; + led_ldo2: LED_LDO2 { + regulator-name = "led_ldo2"; + regulator-min-microvolt = <200000>; + regulator-max-microvolt = <300000>; + }; +}; diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt index 4b98ca26e61a..cbce62c22b60 100644 --- a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt @@ -27,8 +27,38 @@ BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6 LDO1, LDO2, LDO3, LDO4, LDO5, LDO6 Optional properties: +- rohm,dvs-run-voltage : PMIC default "RUN" state voltage in uV. + See below table for bucks which support this. +- rohm,dvs-idle-voltage : PMIC default "IDLE" state voltage in uV. + See below table for bucks which support this. +- rohm,dvs-suspend-voltage : PMIC default "SUSPEND" state voltage in uV. + See below table for bucks which support this. - Any optional property defined in bindings/regulator/regulator.txt +Supported default DVS states: + +BD71837: +buck | dvs-run-voltage | dvs-idle-voltage | dvs-suspend-voltage +----------------------------------------------------------------------------- +1 | supported | supported | supported +---------------------------------------------------------------------------- +2 | supported | supported | not supported +---------------------------------------------------------------------------- +3 | supported | not supported | not supported +---------------------------------------------------------------------------- +4 | supported | not supported | not supported +---------------------------------------------------------------------------- +rest | not supported | not supported | not supported + +BD71847: +buck | dvs-run-voltage | dvs-idle-voltage | dvs-suspend-voltage +----------------------------------------------------------------------------- +1 | supported | supported | supported +---------------------------------------------------------------------------- +2 | supported | supported | not supported +---------------------------------------------------------------------------- +rest | not supported | not supported | not supported + Example: regulators { buck1: BUCK1 { @@ -36,7 +66,11 @@ regulators { regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; + regulator-always-on; regulator-ramp-delay = <1250>; + rohm,dvs-run-voltage = <900000>; + rohm,dvs-idle-voltage = <850000>; + rohm,dvs-suspend-voltage = <800000>; }; buck2: BUCK2 { regulator-name = "buck2"; @@ -45,18 +79,22 @@ regulators { regulator-boot-on; regulator-always-on; regulator-ramp-delay = <1250>; + rohm,dvs-run-voltage = <1000000>; + rohm,dvs-idle-voltage = <900000>; }; buck3: BUCK3 { regulator-name = "buck3"; regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; + rohm,dvs-run-voltage = <1000000>; }; buck4: BUCK4 { regulator-name = "buck4"; regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; + rohm,dvs-run-voltage = <1000000>; }; buck5: BUCK5 { regulator-name = "buck5"; diff --git a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt index a3f476240565..6189df71ea98 100644 --- a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt @@ -23,16 +23,14 @@ Switches are fixed voltage regulators with only enable/disable capability. Optional properties: - st,mask-reset: mask reset for this regulator: the regulator configuration is maintained during pmic reset. -- regulator-pull-down: enable high pull down - if not specified light pull down is used - regulator-over-current-protection: if set, all regulators are switched off in case of over-current detection on this regulator, if not set, the driver only sends an over-current event. -- interrupt-parent: phandle to the parent interrupt controller - interrupts: index of current limit detection interrupt - <regulator>-supply: phandle to the parent supply/regulator node each regulator supply can be described except vref_ddr. +- regulator-active-discharge: can be used on pwr_sw1 and pwr_sw2. Example: regulators { @@ -43,7 +41,6 @@ regulators { vdd_core: buck1 { regulator-name = "vdd_core"; interrupts = <IT_CURLIM_BUCK1 0>; - interrupt-parent = <&pmic>; st,mask-reset; regulator-pull-down; regulator-min-microvolt = <700000>; @@ -53,7 +50,6 @@ regulators { v3v3: buck4 { regulator-name = "v3v3"; interrupts = <IT_CURLIM_BUCK4 0>; - interrupt-parent = <&mypmic>; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; diff --git a/Documentation/devicetree/bindings/regulator/tps65218.txt b/Documentation/devicetree/bindings/regulator/tps65218.txt index 02f0e9bbfbf8..54aded3b78e2 100644 --- a/Documentation/devicetree/bindings/regulator/tps65218.txt +++ b/Documentation/devicetree/bindings/regulator/tps65218.txt @@ -71,8 +71,13 @@ tps65218: tps65218@24 { regulator-always-on; }; + ls2: regulator-ls2 { + regulator-min-microamp = <100000>; + regulator-max-microamp = <1000000>; + }; + ls3: regulator-ls3 { - regulator-min-microvolt = <100000>; - regulator-max-microvolt = <1000000>; + regulator-min-microamp = <100000>; + regulator-max-microamp = <1000000>; }; }; diff --git a/Documentation/devicetree/bindings/spi/atmel-quadspi.txt b/Documentation/devicetree/bindings/spi/atmel-quadspi.txt index b93c1e2f25dd..7c40ea694352 100644 --- a/Documentation/devicetree/bindings/spi/atmel-quadspi.txt +++ b/Documentation/devicetree/bindings/spi/atmel-quadspi.txt @@ -1,14 +1,19 @@ * Atmel Quad Serial Peripheral Interface (QSPI) Required properties: -- compatible: Should be "atmel,sama5d2-qspi". +- compatible: Should be one of the following: + - "atmel,sama5d2-qspi" + - "microchip,sam9x60-qspi" - reg: Should contain the locations and lengths of the base registers and the mapped memory. - reg-names: Should contain the resource reg names: - qspi_base: configuration register address space - qspi_mmap: memory mapped address space - interrupts: Should contain the interrupt for the device. -- clocks: The phandle of the clock needed by the QSPI controller. +- clocks: Should reference the peripheral clock and the QSPI system + clock if available. +- clock-names: Should contain "pclk" for the peripheral clock and "qspick" + for the system clock when available. - #address-cells: Should be <1>. - #size-cells: Should be <0>. @@ -19,7 +24,8 @@ spi@f0020000 { reg = <0xf0020000 0x100>, <0xd0000000 0x8000000>; reg-names = "qspi_base", "qspi_mmap"; interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&spi0_clk>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 52>; + clock-names = "pclk"; #address-cells = <1>; #size-cells = <0>; pinctrl-names = "default"; diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt index e3c48b20b1a6..2d3264140cc5 100644 --- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt +++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt @@ -10,6 +10,7 @@ Required properties: - "fsl,imx35-cspi" for SPI compatible with the one integrated on i.MX35 - "fsl,imx51-ecspi" for SPI compatible with the one integrated on i.MX51 - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc + - "fsl,imx8mq-ecspi" for SPI compatible with the one integrated on i.MX8M - reg : Offset and length of the register set for the device - interrupts : Should contain CSPI/eCSPI interrupt - clocks : Clock specifiers for both ipg and per clocks. diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt index 483e9cfac1b1..e8f1d627d288 100644 --- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt +++ b/Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt @@ -14,15 +14,13 @@ Required properties: - clocks : The clocks needed by the QuadSPI controller - clock-names : Should contain the name of the clocks: "qspi_en" and "qspi". -Optional properties: - - fsl,qspi-has-second-chip: The controller has two buses, bus A and bus B. - Each bus can be connected with two NOR flashes. - Most of the time, each bus only has one NOR flash - connected, this is the default case. - But if there are two NOR flashes connected to the - bus, you should enable this property. - (Please check the board's schematic.) - - big-endian : That means the IP register is big endian +Required SPI slave node properties: + - reg: There are two buses (A and B) with two chip selects each. + This encodes to which bus and CS the flash is connected: + <0>: Bus A, CS 0 + <1>: Bus A, CS 1 + <2>: Bus B, CS 0 + <3>: Bus B, CS 1 Example: @@ -40,7 +38,7 @@ qspi0: quadspi@40044000 { }; }; -Example showing the usage of two SPI NOR devices: +Example showing the usage of two SPI NOR devices on bus A: &qspi2 { pinctrl-names = "default"; diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt new file mode 100644 index 000000000000..2cd67eb727d4 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt @@ -0,0 +1,39 @@ +* NXP Flex Serial Peripheral Interface (FSPI) + +Required properties: + - compatible : Should be "nxp,lx2160a-fspi" + - reg : First contains the register location and length, + Second contains the memory mapping address and length + - reg-names : Should contain the resource reg names: + - fspi_base: configuration register address space + - fspi_mmap: memory mapped address space + - interrupts : Should contain the interrupt for the device + +Required SPI slave node properties: + - reg : There are two buses (A and B) with two chip selects each. + This encodes to which bus and CS the flash is connected: + - <0>: Bus A, CS 0 + - <1>: Bus A, CS 1 + - <2>: Bus B, CS 0 + - <3>: Bus B, CS 1 + +Example showing the usage of two SPI NOR slave devices on bus A: + +fspi0: spi@20c0000 { + compatible = "nxp,lx2160a-fspi"; + reg = <0x0 0x20c0000 0x0 0x10000>, <0x0 0x20000000 0x0 0x10000000>; + reg-names = "fspi_base", "fspi_mmap"; + interrupts = <0 25 0x4>; /* Level high type */ + clocks = <&clockgen 4 3>, <&clockgen 4 3>; + clock-names = "fspi_en", "fspi"; + + mt35xu512aba0: flash@0 { + reg = <0>; + .... + }; + + mt35xu512aba1: flash@1 { + reg = <1>; + .... + }; +}; diff --git a/Documentation/devicetree/bindings/spi/spi-sifive.txt b/Documentation/devicetree/bindings/spi/spi-sifive.txt new file mode 100644 index 000000000000..3f5c6e438972 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-sifive.txt @@ -0,0 +1,37 @@ +SiFive SPI controller Device Tree Bindings +------------------------------------------ + +Required properties: +- compatible : Should be "sifive,<chip>-spi" and "sifive,spi<version>". + Supported compatible strings are: + "sifive,fu540-c000-spi" for the SiFive SPI v0 as integrated + onto the SiFive FU540 chip, and "sifive,spi0" for the SiFive + SPI v0 IP block with no chip integration tweaks. + Please refer to sifive-blocks-ip-versioning.txt for details +- reg : Physical base address and size of SPI registers map + A second (optional) range can indicate memory mapped flash +- interrupts : Must contain one entry +- interrupt-parent : Must be core interrupt controller +- clocks : Must reference the frequency given to the controller +- #address-cells : Must be '1', indicating which CS to use +- #size-cells : Must be '0' + +Optional properties: +- sifive,fifo-depth : Depth of hardware queues; defaults to 8 +- sifive,max-bits-per-word : Maximum bits per word; defaults to 8 + +SPI RTL that corresponds to the IP block version numbers can be found here: +https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/spi + +Example: + spi: spi@10040000 { + compatible = "sifive,fu540-c000-spi", "sifive,spi0"; + reg = <0x0 0x10040000 0x0 0x1000 0x0 0x20000000 0x0 0x10000000>; + interrupt-parent = <&plic>; + interrupts = <51>; + clocks = <&tlclk>; + #address-cells = <1>; + #size-cells = <0>; + sifive,fifo-depth = <8>; + sifive,max-bits-per-word = <8>; + }; diff --git a/Documentation/devicetree/bindings/spi/spi-sprd.txt b/Documentation/devicetree/bindings/spi/spi-sprd.txt index bad211a19da4..3c7eacce0ee3 100644 --- a/Documentation/devicetree/bindings/spi/spi-sprd.txt +++ b/Documentation/devicetree/bindings/spi/spi-sprd.txt @@ -14,6 +14,11 @@ Required properties: address on the SPI bus. Should be set to 1. - #size-cells: Should be set to 0. +Optional properties: +dma-names: Should contain names of the SPI used DMA channel. +dmas: Should contain DMA channels and DMA slave ids which the SPI used + sorted in the same order as the dma-names property. + Example: spi0: spi@70a00000{ compatible = "sprd,sc9860-spi"; @@ -21,6 +26,8 @@ spi0: spi@70a00000{ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; clock-names = "spi", "source","enable"; clocks = <&clk_spi0>, <&ext_26m>, <&clk_ap_apb_gates 5>; + dma-names = "rx_chn", "tx_chn"; + dmas = <&apdma 11 11>, <&apdma 12 12>; #address-cells = <1>; #size-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/spi/spi-stm32.txt b/Documentation/devicetree/bindings/spi/spi-stm32.txt index 1b3fa2c119d5..d82755c63eaf 100644 --- a/Documentation/devicetree/bindings/spi/spi-stm32.txt +++ b/Documentation/devicetree/bindings/spi/spi-stm32.txt @@ -7,7 +7,9 @@ from 4 to 32-bit data size. Although it can be configured as master or slave, only master is supported by the driver. Required properties: -- compatible: Must be "st,stm32h7-spi". +- compatible: Should be one of: + "st,stm32h7-spi" + "st,stm32f4-spi" - reg: Offset and length of the device's register set. - interrupts: Must contain the interrupt id. - clocks: Must contain an entry for spiclk (which feeds the internal clock @@ -30,8 +32,9 @@ Child nodes represent devices on the SPI bus See ../spi/spi-bus.txt Optional properties: -- st,spi-midi-ns: (Master Inter-Data Idleness) minimum time delay in - nanoseconds inserted between two consecutive data frames. +- st,spi-midi-ns: Only for STM32H7, (Master Inter-Data Idleness) minimum time + delay in nanoseconds inserted between two consecutive data + frames. Example: diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 25170ad7d25b..101f2b2c69ad 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -533,16 +533,12 @@ Bridge VLAN filtering function that the driver has to call for each VLAN the given port is a member of. A switchdev object is used to carry the VID and bridge flags. -- port_fdb_prepare: bridge layer function invoked when the bridge prepares the - installation of a Forwarding Database entry. If the operation is not - supported, this function should return -EOPNOTSUPP to inform the bridge code - to fallback to a software implementation. No hardware setup must be done in - this function. See port_fdb_add for this and details. - - port_fdb_add: bridge layer function invoked when the bridge wants to install a Forwarding Database entry, the switch hardware should be programmed with the specified address in the specified VLAN Id in the forwarding database - associated with this VLAN ID + associated with this VLAN ID. If the operation is not supported, this + function should return -EOPNOTSUPP to inform the bridge code to fallback to + a software implementation. Note: VLAN ID 0 corresponds to the port private database, which, in the context of DSA, would be the its port-based VLAN, used by the associated bridge device. diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst index fe46d4867e2d..18c1415e7bfa 100644 --- a/Documentation/networking/msg_zerocopy.rst +++ b/Documentation/networking/msg_zerocopy.rst @@ -7,7 +7,7 @@ Intro ===== The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. -The feature is currently implemented for TCP sockets. +The feature is currently implemented for TCP and UDP sockets. Opportunity and Caveats diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 82236a17b5e6..97b7ca8b9b86 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -92,11 +92,11 @@ device. Switch ID ^^^^^^^^^ -The switchdev driver must implement the switchdev op switchdev_port_attr_get -for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same -physical ID for each port of a switch. The ID must be unique between switches -on the same system. The ID does not need to be unique between switches on -different systems. +The switchdev driver must implement the net_device operation +ndo_get_port_parent_id for each port netdev, returning the same physical ID for +each port of a switch. The ID must be unique between switches on the same +system. The ID does not need to be unique between switches on different +systems. The switch ID is used to locate ports on a switch and to know if aggregated ports belong to the same switch. diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst index dc2ddc345044..fbb9297e6360 100644 --- a/Documentation/process/applying-patches.rst +++ b/Documentation/process/applying-patches.rst @@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to generate a patch representing the differences between two patches and then apply the result. -This will let you move from something like 4.7.2 to 4.7.3 in a single +This will let you move from something like 5.7.2 to 5.7.3 in a single step. The -z flag to interdiff will even let you feed it patches in gzip or bzip2 compressed form directly without the use of zcat or bzcat or manual decompression. -Here's how you'd go from 4.7.2 to 4.7.3 in a single step:: +Here's how you'd go from 5.7.2 to 5.7.3 in a single step:: - interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1 + interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1 Although interdiff may save you a step or two you are generally advised to do the additional steps since interdiff can get things wrong in some cases. @@ -245,62 +245,67 @@ The patches are available at http://kernel.org/ Most recent patches are linked from the front page, but they also have specific homes. -The 4.x.y (-stable) and 4.x patches live at +The 5.x.y (-stable) and 5.x patches live at - https://www.kernel.org/pub/linux/kernel/v4.x/ + https://www.kernel.org/pub/linux/kernel/v5.x/ -The -rc patches live at +The -rc patches are not stored on the webserver but are generated on +demand from git tags such as - https://www.kernel.org/pub/linux/kernel/v4.x/testing/ + https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0 +The stable -rc patches live at -The 4.x kernels + https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/ + + +The 5.x kernels =============== These are the base stable releases released by Linus. The highest numbered release is the most recent. If regressions or other serious flaws are found, then a -stable fix patch -will be released (see below) on top of this base. Once a new 4.x base +will be released (see below) on top of this base. Once a new 5.x base kernel is released, a patch is made available that is a delta between the -previous 4.x kernel and the new one. +previous 5.x kernel and the new one. -To apply a patch moving from 4.6 to 4.7, you'd do the following (note -that such patches do **NOT** apply on top of 4.x.y kernels but on top of the -base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to -first revert the 4.x.y patch). +To apply a patch moving from 5.6 to 5.7, you'd do the following (note +that such patches do **NOT** apply on top of 5.x.y kernels but on top of the +base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to +first revert the 5.x.y patch). Here are some examples:: - # moving from 4.6 to 4.7 + # moving from 5.6 to 5.7 - $ cd ~/linux-4.6 # change to kernel source dir - $ patch -p1 < ../patch-4.7 # apply the 4.7 patch + $ cd ~/linux-5.6 # change to kernel source dir + $ patch -p1 < ../patch-5.7 # apply the 5.7 patch $ cd .. - $ mv linux-4.6 linux-4.7 # rename source dir + $ mv linux-5.6 linux-5.7 # rename source dir - # moving from 4.6.1 to 4.7 + # moving from 5.6.1 to 5.7 - $ cd ~/linux-4.6.1 # change to kernel source dir - $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch - # source dir is now 4.6 - $ patch -p1 < ../patch-4.7 # apply new 4.7 patch + $ cd ~/linux-5.6.1 # change to kernel source dir + $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch + # source dir is now 5.6 + $ patch -p1 < ../patch-5.7 # apply new 5.7 patch $ cd .. - $ mv linux-4.6.1 linux-4.7 # rename source dir + $ mv linux-5.6.1 linux-5.7 # rename source dir -The 4.x.y kernels +The 5.x.y kernels ================= Kernels with 3-digit versions are -stable kernels. They contain small(ish) critical fixes for security problems or significant regressions discovered -in a given 4.x kernel. +in a given 5.x kernel. This is the recommended branch for users who want the most recent stable kernel and are not interested in helping test development/experimental versions. -If no 4.x.y kernel is available, then the highest numbered 4.x kernel is +If no 5.x.y kernel is available, then the highest numbered 5.x kernel is the current stable kernel. .. note:: @@ -308,23 +313,23 @@ the current stable kernel. The -stable team usually do make incremental patches available as well as patches against the latest mainline release, but I only cover the non-incremental ones below. The incremental ones can be found at - https://www.kernel.org/pub/linux/kernel/v4.x/incr/ + https://www.kernel.org/pub/linux/kernel/v5.x/incr/ -These patches are not incremental, meaning that for example the 4.7.3 -patch does not apply on top of the 4.7.2 kernel source, but rather on top -of the base 4.7 kernel source. +These patches are not incremental, meaning that for example the 5.7.3 +patch does not apply on top of the 5.7.2 kernel source, but rather on top +of the base 5.7 kernel source. -So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel -source you have to first back out the 4.7.2 patch (so you are left with a -base 4.7 kernel source) and then apply the new 4.7.3 patch. +So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel +source you have to first back out the 5.7.2 patch (so you are left with a +base 5.7 kernel source) and then apply the new 5.7.3 patch. Here's a small example:: - $ cd ~/linux-4.7.2 # change to the kernel source dir - $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch - $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch + $ cd ~/linux-5.7.2 # change to the kernel source dir + $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch + $ patch -p1 < ../patch-5.7.3 # apply the new 5.7.3 patch $ cd .. - $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir + $ mv linux-5.7.2 linux-5.7.3 # rename the kernel source dir The -rc kernels =============== @@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing development kernels but do not want to run some of the really experimental stuff (such people should see the sections about -next and -mm kernels below). -The -rc patches are not incremental, they apply to a base 4.x kernel, just -like the 4.x.y patches described above. The kernel version before the -rcN +The -rc patches are not incremental, they apply to a base 5.x kernel, just +like the 5.x.y patches described above. The kernel version before the -rcN suffix denotes the version of the kernel that this -rc kernel will eventually turn into. -So, 4.8-rc5 means that this is the fifth release candidate for the 4.8 -kernel and the patch should be applied on top of the 4.7 kernel source. +So, 5.8-rc5 means that this is the fifth release candidate for the 5.8 +kernel and the patch should be applied on top of the 5.7 kernel source. Here are 3 examples of how to apply these patches:: - # first an example of moving from 4.7 to 4.8-rc3 + # first an example of moving from 5.7 to 5.8-rc3 - $ cd ~/linux-4.7 # change to the 4.7 source dir - $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch + $ cd ~/linux-5.7 # change to the 5.7 source dir + $ patch -p1 < ../patch-5.8-rc3 # apply the 5.8-rc3 patch $ cd .. - $ mv linux-4.7 linux-4.8-rc3 # rename the source dir + $ mv linux-5.7 linux-5.8-rc3 # rename the source dir - # now let's move from 4.8-rc3 to 4.8-rc5 + # now let's move from 5.8-rc3 to 5.8-rc5 - $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir - $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch - $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch + $ cd ~/linux-5.8-rc3 # change to the 5.8-rc3 dir + $ patch -p1 -R < ../patch-5.8-rc3 # revert the 5.8-rc3 patch + $ patch -p1 < ../patch-5.8-rc5 # apply the new 5.8-rc5 patch $ cd .. - $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir + $ mv linux-5.8-rc3 linux-5.8-rc5 # rename the source dir - # finally let's try and move from 4.7.3 to 4.8-rc5 + # finally let's try and move from 5.7.3 to 5.8-rc5 - $ cd ~/linux-4.7.3 # change to the kernel source dir - $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch - $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch + $ cd ~/linux-5.7.3 # change to the kernel source dir + $ patch -p1 -R < ../patch-5.7.3 # revert the 5.7.3 patch + $ patch -p1 < ../patch-5.8-rc5 # apply new 5.8-rc5 patch $ cd .. - $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir + $ mv linux-5.7.3 linux-5.8-rc5 # rename the kernel source dir The -mm patches and the linux-next tree diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx index 13a0b7fb192f..551325b66b23 100644 --- a/Documentation/spi/pxa2xx +++ b/Documentation/spi/pxa2xx @@ -21,15 +21,15 @@ Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a "platform device". The master configuration is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h: -struct pxa2xx_spi_master { +struct pxa2xx_spi_controller { u16 num_chipselect; u8 enable_dma; }; -The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of +The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of slave device (chips) attached to this SPI master. -The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should +The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should be used. This caused the driver to acquire two DMA channels: rx_channel and tx_channel. The rx_channel has a higher DMA service priority the tx_channel. See the "PXA2xx Developer Manual" section "DMA Controller". @@ -51,7 +51,7 @@ static struct resource pxa_spi_nssp_resources[] = { }, }; -static struct pxa2xx_spi_master pxa_nssp_master_info = { +static struct pxa2xx_spi_controller pxa_nssp_master_info = { .num_chipselect = 1, /* Matches the number of chips attached to NSSP */ .enable_dma = 1, /* Enables NSSP DMA */ }; @@ -206,7 +206,7 @@ DMA and PIO I/O Support ----------------------- The pxa2xx_spi driver supports both DMA and interrupt driven PIO message transfers. The driver defaults to PIO mode and DMA transfers must be enabled -by setting the "enable_dma" flag in the "pxa2xx_spi_master" structure. The DMA +by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure. The DMA mode supports both coherent and stream based DMA mappings. The following logic is used to determine the type of I/O to be used on diff --git a/Documentation/translations/it_IT/admin-guide/README.rst b/Documentation/translations/it_IT/admin-guide/README.rst index 80f5ffc94a9e..b37166817842 100644 --- a/Documentation/translations/it_IT/admin-guide/README.rst +++ b/Documentation/translations/it_IT/admin-guide/README.rst @@ -4,7 +4,7 @@ .. _it_readme: -Rilascio del kernel Linux 4.x <http://kernel.org/> +Rilascio del kernel Linux 5.x <http://kernel.org/> =================================================== .. warning:: diff --git a/MAINTAINERS b/MAINTAINERS index 41ce5f4ad838..7f06c136f54c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -409,8 +409,7 @@ F: drivers/platform/x86/wmi.c F: include/uapi/linux/wmi.h AD1889 ALSA SOUND DRIVER -M: Thibaut Varene <T-Bone@parisc-linux.org> -W: http://wiki.parisc-linux.org/AD1889 +W: https://parisc.wiki.kernel.org/index.php/AD1889 L: linux-parisc@vger.kernel.org S: Maintained F: sound/pci/ad1889.* @@ -2852,7 +2851,7 @@ R: Martin KaFai Lau <kafai@fb.com> R: Song Liu <songliubraving@fb.com> R: Yonghong Song <yhs@fb.com> L: netdev@vger.kernel.org -L: linux-kernel@vger.kernel.org +L: bpf@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 @@ -2882,6 +2881,7 @@ N: bpf BPF JIT for ARM M: Shubham Bansal <illusionist.neo@gmail.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/arm/net/ @@ -2890,18 +2890,21 @@ M: Daniel Borkmann <daniel@iogearbox.net> M: Alexei Starovoitov <ast@kernel.org> M: Zi Shen Lim <zlim.lnx@gmail.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: arch/arm64/net/ BPF JIT for MIPS (32-BIT AND 64-BIT) M: Paul Burton <paul.burton@mips.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/mips/net/ BPF JIT for NFP NICs M: Jakub Kicinski <jakub.kicinski@netronome.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: drivers/net/ethernet/netronome/nfp/bpf/ @@ -2909,6 +2912,7 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT) M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> M: Sandipan Das <sandipan@linux.ibm.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/powerpc/net/ @@ -2916,6 +2920,7 @@ BPF JIT for S390 M: Martin Schwidefsky <schwidefsky@de.ibm.com> M: Heiko Carstens <heiko.carstens@de.ibm.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/s390/net/ X: arch/s390/net/pnet.c @@ -2923,12 +2928,14 @@ X: arch/s390/net/pnet.c BPF JIT for SPARC (32-BIT AND 64-BIT) M: David S. Miller <davem@davemloft.net> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/sparc/net/ BPF JIT for X86 32-BIT M: Wang YanQing <udknight@gmail.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/x86/net/bpf_jit_comp32.c @@ -2936,6 +2943,7 @@ BPF JIT for X86 64-BIT M: Alexei Starovoitov <ast@kernel.org> M: Daniel Borkmann <daniel@iogearbox.net> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: arch/x86/net/ X: arch/x86/net/bpf_jit_comp32.c @@ -3390,9 +3398,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic* F: drivers/media/platform/marvell-ccic/ CAIF NETWORK LAYER -M: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> L: netdev@vger.kernel.org -S: Supported +S: Orphan F: Documentation/networking/caif/ F: drivers/net/caif/ F: include/uapi/linux/caif/ @@ -6098,9 +6105,9 @@ F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt FREESCALE QUAD SPI DRIVER M: Han Xu <han.xu@nxp.com> -L: linux-mtd@lists.infradead.org +L: linux-spi@vger.kernel.org S: Maintained -F: drivers/mtd/spi-nor/fsl-quadspi.c +F: drivers/spi/spi-fsl-qspi.c FREESCALE QUICC ENGINE LIBRARY M: Qiang Zhao <qiang.zhao@nxp.com> @@ -7169,6 +7176,7 @@ F: drivers/i2c/i2c-stub.c I3C SUBSYSTEM M: Boris Brezillon <bbrezillon@kernel.org> L: linux-i3c@lists.infradead.org +C: irc://chat.freenode.net/linux-i3c T: git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git S: Maintained F: Documentation/ABI/testing/sysfs-bus-i3c @@ -8487,6 +8495,7 @@ L7 BPF FRAMEWORK M: John Fastabend <john.fastabend@gmail.com> M: Daniel Borkmann <daniel@iogearbox.net> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: include/linux/skmsg.h F: net/core/skmsg.c @@ -9858,6 +9867,13 @@ F: drivers/media/platform/meson/ao-cec.c F: Documentation/devicetree/bindings/media/meson-ao-cec.txt T: git git://linuxtv.org/media_tree.git +MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS +M: Liang Yang <liang.yang@amlogic.com> +L: linux-mtd@lists.infradead.org +S: Maintained +F: drivers/mtd/nand/raw/meson_* +F: Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt + MICROBLAZE ARCHITECTURE M: Michal Simek <monstr@monstr.eu> W: http://www.monstr.eu/fdt/ @@ -10936,6 +10952,14 @@ F: lib/objagg.c F: lib/test_objagg.c F: include/linux/objagg.h +NXP FSPI DRIVER +R: Yogesh Gaur <yogeshgaur.83@gmail.com> +M: Ashish Kumar <ashish.kumar@nxp.com> +L: linux-spi@vger.kernel.org +S: Maintained +F: drivers/spi/spi-nxp-fspi.c +F: Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt + OBJTOOL M: Josh Poimboeuf <jpoimboe@redhat.com> M: Peter Zijlstra <peterz@infradead.org> @@ -11488,7 +11512,7 @@ F: Documentation/blockdev/paride.txt F: drivers/block/paride/ PARISC ARCHITECTURE -M: "James E.J. Bottomley" <jejb@parisc-linux.org> +M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> M: Helge Deller <deller@gmx.de> L: linux-parisc@vger.kernel.org W: http://www.parisc-linux.org/ @@ -13600,11 +13624,18 @@ F: drivers/mmc/host/sdhci-brcmstb* SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER M: Adrian Hunter <adrian.hunter@intel.com> L: linux-mmc@vger.kernel.org -T: git git://git.infradead.org/users/ahunter/linux-sdhci.git S: Maintained F: drivers/mmc/host/sdhci* F: include/linux/mmc/sdhci* +EMMC CMDQ HOST CONTROLLER INTERFACE (CQHCI) DRIVER +M: Adrian Hunter <adrian.hunter@intel.com> +M: Ritesh Harjani <riteshh@codeaurora.org> +M: Asutosh Das <asutoshd@codeaurora.org> +L: linux-mmc@vger.kernel.org +S: Maintained +F: drivers/mmc/host/cqhci* + SYNOPSYS SDHCI COMPLIANT DWC MSHC DRIVER M: Prabu Thangamuthu <prabu.t@synopsys.com> M: Manjunath M B <manjumb@synopsys.com> @@ -14331,6 +14362,7 @@ F: arch/arm/mach-spear/ SPI NOR SUBSYSTEM M: Marek Vasut <marek.vasut@gmail.com> +M: Tudor Ambarus <tudor.ambarus@microchip.com> L: linux-mtd@lists.infradead.org W: http://www.linux-mtd.infradead.org/ Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ @@ -16714,6 +16746,7 @@ M: Jesper Dangaard Brouer <hawk@kernel.org> M: John Fastabend <john.fastabend@gmail.com> L: netdev@vger.kernel.org L: xdp-newbies@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: net/core/xdp.c F: include/net/xdp.h @@ -16727,6 +16760,7 @@ XDP SOCKETS (AF_XDP) M: Björn Töpel <bjorn.topel@intel.com> M: Magnus Karlsson <magnus.karlsson@intel.com> L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: kernel/bpf/xskmap.c F: net/xdp/ @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = NAME = Shy Crocodile # *DOCUMENTATION* diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index cf4ac791a592..1fe2b56cb861 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -18,7 +18,6 @@ #define USER_DS ((mm_segment_t) { -0x40000000000UL }) #define get_fs() (current_thread_info()->addr_limit) -#define get_ds() (KERNEL_DS) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define segment_eq(a, b) ((a).seg == (b).seg) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 376366a7db81..d750b302d5ab 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -191,7 +191,6 @@ config NR_CPUS config ARC_SMP_HALT_ON_RESET bool "Enable Halt-on-reset boot mode" - default y if ARC_UBOOT_SUPPORT help In SMP configuration cores can be configured as Halt-on-reset or they could all start at same time. For Halt-on-reset, non @@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS (also referred to as r58:r59). These can also be used by gcc as GPR so kernel needs to save/restore per process +config ARC_IRQ_NO_AUTOSAVE + bool "Disable hardware autosave regfile on interrupts" + default n + help + On HS cores, taken interrupt auto saves the regfile on stack. + This is programmable and can be optionally disabled in which case + software INTERRUPT_PROLOGUE/EPILGUE do the needed work + endif # ISA_ARCV2 endmenu # "ARC CPU Configuration" @@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA endif -config ARC_UBOOT_SUPPORT - bool "Support uboot arg Handling" - help - ARC Linux by default checks for uboot provided args as pointers to - external cmdline or DTB. This however breaks in absence of uboot, - when booting from Metaware debugger directly, as the registers are - not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus - registers look like uboot args to kernel which then chokes. - So only enable the uboot arg checking/processing if users are sure - of uboot being in play. - config ARC_BUILTIN_DTB_NAME string "Built in DTB" help diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 6e84060e7c90..621f59407d76 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5 # CONFIG_ARC_HAS_LLSC is not set CONFIG_ARC_KVADDR_SIZE=402 CONFIG_ARC_EMUL_UNALIGNED=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_PREEMPT=y CONFIG_NET=y CONFIG_UNIX=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 1e59a2e9c602..e447ace6fa1c 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_ARC_PLAT_AXS10X=y CONFIG_AXS103=y CONFIG_ISA_ARCV2=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" CONFIG_PREEMPT=y CONFIG_NET=y diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index b5c3f6c54b03..c82cdb10aaf4 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -15,8 +15,6 @@ CONFIG_AXS103=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y # CONFIG_ARC_TIMERS_64BIT is not set -# CONFIG_ARC_SMP_HALT_ON_RESET is not set -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" CONFIG_PREEMPT=y CONFIG_NET=y diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index f1b86cef0905..a27eafdc8260 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -151,6 +151,14 @@ struct bcr_isa_arcv2 { #endif }; +struct bcr_uarch_build_arcv2 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:8, prod:8, maj:8, min:8; +#else + unsigned int min:8, maj:8, prod:8, pad:8; +#endif +}; + struct bcr_mpy { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index f393b663413e..2ad77fb43639 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -52,6 +52,17 @@ #define cache_line_size() SMP_CACHE_BYTES #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES +/* + * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses + * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit + * alignment for any atomic64_t embedded in buffer. + * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed + * value of 4 (and not 8) in ARC ABI. + */ +#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC) +#define ARCH_SLAB_MINALIGN 8 +#endif + extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 309f4e6721b3..225e7df2d8ed 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -17,6 +17,33 @@ ; ; Now manually save: r12, sp, fp, gp, r25 +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + st.as r9, [sp, -10] ; save r9 in it's final stack slot + sub sp, sp, 12 ; skip JLI, LDI, EI + + PUSH lp_count + PUSHAX lp_start + PUSHAX lp_end + PUSH blink + + PUSH r11 + PUSH r10 + + sub sp, sp, 4 ; skip r9 + + PUSH r8 + PUSH r7 + PUSH r6 + PUSH r5 + PUSH r4 + PUSH r3 + PUSH r2 + PUSH r1 + PUSH r0 +.endif +#endif + #ifdef CONFIG_ARC_HAS_ACCL_REGS PUSH r59 PUSH r58 @@ -86,6 +113,33 @@ POP r59 #endif +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + POP r0 + POP r1 + POP r2 + POP r3 + POP r4 + POP r5 + POP r6 + POP r7 + POP r8 + POP r9 + POP r10 + POP r11 + + POP blink + POPAX lp_end + POPAX lp_start + + POP r9 + mov lp_count, r9 + + add sp, sp, 12 ; skip JLI, LDI, EI + ld.as r9, [sp, -10] ; reload r9 which got clobbered +.endif +#endif + .endm /*------------------------------------------------------------------------*/ diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index c9173c02081c..eabc3efa6c6d 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) " .previous \n" : "+r"(d_char), "+r"(res) : "i"(0) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } @@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) : "g"(-EFAULT), "r"(count) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cc558a25b8fa..562089d62d9d 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -209,7 +209,9 @@ restore_regs: ;####### Return from Intr ####### debug_marker_l1: - bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + btst r0, STATUS_DE_BIT ; Z flag set if bit clear + bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set .Lisr_ret_fast_path: ; Handle special case #1: (Entry via Exception, Return via IRQ) diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 8b90d25a15cc..30e090625916 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -17,6 +17,7 @@ #include <asm/entry.h> #include <asm/arcregs.h> #include <asm/cache.h> +#include <asm/irqflags.h> .macro CPU_EARLY_SETUP @@ -47,6 +48,15 @@ sr r5, [ARC_REG_DC_CTRL] 1: + +#ifdef CONFIG_ISA_ARCV2 + ; Unaligned access is disabled at reset, so re-enable early as + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access + ; by default + lr r5, [status32] + bset r5, r5, STATUS_AD_BIT + kflag r5 +#endif .endm .section .init.text, "ax",@progbits @@ -90,15 +100,13 @@ ENTRY(stext) st.ab 0, [r5, 4] 1: -#ifdef CONFIG_ARC_UBOOT_SUPPORT ; Uboot - kernel ABI ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 - ; r1 = magic number (board identity, unused as of now + ; r1 = magic number (always zero as of now) ; r2 = pointer to uboot provided cmdline or external DTB in mem - ; These are handled later in setup_arch() + ; These are handled later in handle_uboot_args() st r0, [@uboot_tag] st r2, [@uboot_arg] -#endif ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 067ea362fb3e..cf18b3e5a934 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -49,11 +49,13 @@ void arc_init_IRQ(void) *(unsigned int *)&ictrl = 0; +#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ ictrl.save_blink = 1; ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ +#endif WRITE_AUX(AUX_IRQ_CTRL, ictrl); diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index feb90093e6b1..7b2340996cf8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.ret_stk = 4 << bpu.rse; if (cpu->core.family >= 0x54) { - unsigned int exec_ctrl; - READ_BCR(AUX_EXEC_CTRL, exec_ctrl); - cpu->extn.dual_enb = !(exec_ctrl & 1); + struct bcr_uarch_build_arcv2 uarch; - /* dual issue always present for this core */ - cpu->extn.dual = 1; + /* + * The first 0x54 core (uarch maj:min 0:1 or 0:2) was + * dual issue only (HS4x). But next uarch rev (1:0) + * allows it be configured for single issue (HS3x) + * Ensure we fiddle with dual issue only on HS4x + */ + READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch); + + if (uarch.prod == 4) { + unsigned int exec_ctrl; + + /* dual issue hardware always present */ + cpu->extn.dual = 1; + + READ_BCR(AUX_EXEC_CTRL, exec_ctrl); + + /* dual issue hardware enabled ? */ + cpu->extn.dual_enb = !(exec_ctrl & 1); + + } } } READ_BCR(ARC_REG_AP_BCR, ap); if (ap.ver) { cpu->extn.ap_num = 2 << ap.num; - cpu->extn.ap_full = !!ap.min; + cpu->extn.ap_full = !ap.min; } READ_BCR(ARC_REG_SMART_BCR, bcr); @@ -462,43 +478,78 @@ void setup_processor(void) arc_chk_core_config(); } -static inline int is_kernel(unsigned long addr) +static inline bool uboot_arg_invalid(unsigned long addr) { - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) - return 1; - return 0; + /* + * Check that it is a untranslated address (although MMU is not enabled + * yet, it being a high address ensures this is not by fluke) + */ + if (addr < PAGE_OFFSET) + return true; + + /* Check that address doesn't clobber resident kernel image */ + return addr >= (unsigned long)_stext && addr <= (unsigned long)_end; } -void __init setup_arch(char **cmdline_p) +#define IGNORE_ARGS "Ignore U-boot args: " + +/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */ +#define UBOOT_TAG_NONE 0 +#define UBOOT_TAG_CMDLINE 1 +#define UBOOT_TAG_DTB 2 + +void __init handle_uboot_args(void) { -#ifdef CONFIG_ARC_UBOOT_SUPPORT - /* make sure that uboot passed pointer to cmdline/dtb is valid */ - if (uboot_tag && is_kernel((unsigned long)uboot_arg)) - panic("Invalid uboot arg\n"); - - /* See if u-boot passed an external Device Tree blob */ - machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ - if (!machine_desc) -#endif - { - /* No, so try the embedded one */ + bool use_embedded_dtb = true; + bool append_cmdline = false; + + /* check that we know this tag */ + if (uboot_tag != UBOOT_TAG_NONE && + uboot_tag != UBOOT_TAG_CMDLINE && + uboot_tag != UBOOT_TAG_DTB) { + pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag); + goto ignore_uboot_args; + } + + if (uboot_tag != UBOOT_TAG_NONE && + uboot_arg_invalid((unsigned long)uboot_arg)) { + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg); + goto ignore_uboot_args; + } + + /* see if U-boot passed an external Device Tree blob */ + if (uboot_tag == UBOOT_TAG_DTB) { + machine_desc = setup_machine_fdt((void *)uboot_arg); + + /* external Device Tree blob is invalid - use embedded one */ + use_embedded_dtb = !machine_desc; + } + + if (uboot_tag == UBOOT_TAG_CMDLINE) + append_cmdline = true; + +ignore_uboot_args: + + if (use_embedded_dtb) { machine_desc = setup_machine_fdt(__dtb_start); if (!machine_desc) panic("Embedded DT invalid\n"); + } - /* - * If we are here, it is established that @uboot_arg didn't - * point to DT blob. Instead if u-boot says it is cmdline, - * append to embedded DT cmdline. - * setup_machine_fdt() would have populated @boot_command_line - */ - if (uboot_tag == 1) { - /* Ensure a whitespace between the 2 cmdlines */ - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, uboot_arg, - COMMAND_LINE_SIZE); - } + /* + * NOTE: @boot_command_line is populated by setup_machine_fdt() so this + * append processing can only happen after. + */ + if (append_cmdline) { + /* Ensure a whitespace between the 2 cmdlines */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE); } +} + +void __init setup_arch(char **cmdline_p) +{ + handle_uboot_args(); /* Save unparsed command line copy for /proc/cmdline */ *cmdline_p = boot_command_line; diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index d61044dd8b58..ea14b0bf3116 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -25,15 +25,11 @@ #endif #ifdef CONFIG_ARC_HAS_LL64 -# define PREFETCH_READ(RX) prefetch [RX, 56] -# define PREFETCH_WRITE(RX) prefetchw [RX, 64] # define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define STOREX(SRC,RX) std.ab SRC, [RX, 8] # define ZOLSHFT 5 # define ZOLAND 0x1F #else -# define PREFETCH_READ(RX) prefetch [RX, 28] -# define PREFETCH_WRITE(RX) prefetchw [RX, 32] # define LOADX(DST,RX) ld.ab DST, [RX, 4] # define STOREX(SRC,RX) st.ab SRC, [RX, 4] # define ZOLSHFT 4 @@ -41,8 +37,6 @@ #endif ENTRY_CFI(memcpy) - prefetch [r1] ; Prefetch the read location - prefetchw [r0] ; Prefetch the write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -72,8 +66,6 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy32_64bytes ;; LOOP START LOADX (r6, r1) - PREFETCH_READ (r1) - PREFETCH_WRITE (r3) LOADX (r8, r1) LOADX (r10, r1) LOADX (r4, r1) @@ -117,9 +109,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_1 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 24) or r7, r7, r5 @@ -162,9 +152,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_2 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 16) or r7, r7, r5 @@ -204,9 +192,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_3 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index f25c085b9874..23e00216e5a5 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" depends on ISA_ARCV2 select ARC_HAS_ACCL_REGS + select ARC_IRQ_NO_AUTOSAVE select CLK_HSDK select RESET_HSDK select HAVE_PCI diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 664e918e2624..26524b75970a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1400,6 +1400,7 @@ config NR_CPUS config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP + select GENERIC_IRQ_MIGRATION help Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index b67f5fee1469..dce5be5df97b 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -729,7 +729,7 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; }; &tscadc { diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 172c0224e7f6..b128998097ce 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -651,13 +651,13 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; dual_emac_res_vlan = <1>; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; dual_emac_res_vlan = <2>; }; diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index f3ac7483afed..5d04dc68cf57 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts @@ -144,30 +144,32 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "U-Boot"; - reg = <0 0x800000>; - }; - partition@800000 { - label = "Linux"; - reg = <0x800000 0x800000>; - }; - partition@1000000 { - label = "Filesystem"; - reg = <0x1000000 0x3f000000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "U-Boot"; + reg = <0 0x800000>; + }; + partition@800000 { + label = "Linux"; + reg = <0x800000 0x800000>; + }; + partition@1000000 { + label = "Filesystem"; + reg = <0x1000000 0x3f000000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 1139e9469a83..b4cca507cf13 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts @@ -160,12 +160,15 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; + + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts index bbbb38888bb8..87dcb502f72d 100644 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts @@ -81,49 +81,52 @@ }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "u-boot"; - reg = <0x00000000 0x000e0000>; - read-only; - }; - - partition@e0000 { - label = "u-boot-env"; - reg = <0x000e0000 0x00020000>; - read-only; - }; - - partition@100000 { - label = "u-boot-env2"; - reg = <0x00100000 0x00020000>; - read-only; - }; - - partition@120000 { - label = "zImage"; - reg = <0x00120000 0x00400000>; - }; - - partition@520000 { - label = "initrd"; - reg = <0x00520000 0x00400000>; - }; - partition@e00000 { - label = "boot"; - reg = <0x00e00000 0x3f200000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "u-boot"; + reg = <0x00000000 0x000e0000>; + read-only; + }; + + partition@e0000 { + label = "u-boot-env"; + reg = <0x000e0000 0x00020000>; + read-only; + }; + + partition@100000 { + label = "u-boot-env2"; + reg = <0x00100000 0x00020000>; + read-only; + }; + + partition@120000 { + label = "zImage"; + reg = <0x00120000 0x00400000>; + }; + + partition@520000 { + label = "initrd"; + reg = <0x00520000 0x00400000>; + }; + + partition@e00000 { + label = "boot"; + reg = <0x00e00000 0x3f200000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts index cc0c3cf89eaa..592111c8d6fd 100644 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -443,7 +443,7 @@ }; display-controller@6a000000 { - status = "disabled"; + status = "okay"; port@0 { reg = <0>; diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi index d5f11d6d987e..bc85b6a166c7 100644 --- a/arch/arm/boot/dts/tegra124-nyan.dtsi +++ b/arch/arm/boot/dts/tegra124-nyan.dtsi @@ -13,10 +13,25 @@ stdout-path = "serial0:115200n8"; }; - memory@80000000 { + /* + * Note that recent version of the device tree compiler (starting with + * version 1.4.2) warn about this node containing a reg property, but + * missing a unit-address. However, the bootloader on these Chromebook + * devices relies on the full name of this node to be exactly /memory. + * Adding the unit-address causes the bootloader to create a /memory + * node and write the memory bank configuration to that node, which in + * turn leads the kernel to believe that the device has 2 GiB of + * memory instead of the amount detected by the bootloader. + * + * The name of this node is effectively ABI and must not be changed. + */ + memory { + device_type = "memory"; reg = <0x0 0x80000000 0x0 0x80000000>; }; + /delete-node/ memory@80000000; + host1x@50000000 { hdmi@54280000 { status = "okay"; diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index b9ec44060ed3..a03cf4dfb781 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -212,10 +212,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 3b58300d611c..054aae0edfce 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -93,10 +93,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index fb5d15048c0b..788c17b56ecc 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index b1c334a49cda..710ea309769e 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b6..46d41140df27 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -25,7 +25,6 @@ #ifndef __ASSEMBLY__ struct irqaction; struct pt_regs; -extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *); diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 42aa4a22803c..ae5a0df5316e 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -59,7 +59,6 @@ extern int __put_user_bad(void); * Note that this is actually 0x1,0000,0000 */ #define KERNEL_DS 0x00000000 -#define get_ds() (KERNEL_DS) #ifdef CONFIG_MMU diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -31,7 +31,6 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> -#include <linux/ratelimit.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> @@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) return nr_irqs; } #endif - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 16601d1442d1..72cc0862a30e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -150,7 +150,7 @@ void __show_regs(struct pt_regs *regs) if ((domain & domain_mask(DOMAIN_USER)) == domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) segment = "none"; - else if (fs == get_ds()) + else if (fs == KERNEL_DS) segment = "kernel"; else segment = "user"; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..1d6f5ea522f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -254,7 +254,7 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); + irq_migrate_all_off_this_cpu(); /* * Flush user cache and TLB mappings, and then remove this CPU diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c index 2e1e540f2e5a..d278fb672d40 100644 --- a/arch/arm/mach-imx/mach-mx21ads.c +++ b/arch/arm/mach-imx/mach-mx21ads.c @@ -205,7 +205,6 @@ static struct regulator_init_data mx21ads_lcd_regulator_init_data = { static struct fixed_voltage_config mx21ads_lcd_regulator_pdata = { .supply_name = "LCD", .microvolts = 3300000, - .enable_high = 1, .init_data = &mx21ads_lcd_regulator_init_data, }; diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c index f5e04047ed13..6dd7f57c332f 100644 --- a/arch/arm/mach-imx/mach-mx27ads.c +++ b/arch/arm/mach-imx/mach-mx27ads.c @@ -237,7 +237,7 @@ static struct fixed_voltage_config mx27ads_lcd_regulator_pdata = { static struct gpiod_lookup_table mx27ads_lcd_regulator_gpiod_table = { .dev_id = "reg-fixed-voltage.0", /* Let's hope ID 0 is what we get */ .table = { - GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c index a04e249c654b..d2560fb1e835 100644 --- a/arch/arm/mach-mmp/brownstone.c +++ b/arch/arm/mach-mmp/brownstone.c @@ -149,7 +149,6 @@ static struct regulator_init_data brownstone_v_5vp_data = { static struct fixed_voltage_config brownstone_v_5vp = { .supply_name = "v_5vp", .microvolts = 5000000, - .enable_high = 1, .enabled_at_boot = 1, .init_data = &brownstone_v_5vp_data, }; diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index c4c0a8ea11e4..be30c3c061b4 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -267,7 +267,6 @@ static struct fixed_voltage_config modem_nreset_config = { .supply_name = "modem_nreset", .microvolts = 3300000, .startup_delay = 25000, - .enable_high = 1, .enabled_at_boot = 1, .init_data = &modem_nreset_data, }; @@ -533,7 +532,6 @@ static struct regulator_init_data keybrd_pwr_initdata = { static struct fixed_voltage_config keybrd_pwr_config = { .supply_name = "keybrd_pwr", .microvolts = 5000000, - .enable_high = 1, .init_data = &keybrd_pwr_initdata, }; diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 8a5b6ed4ec36..a2ecc5e69abb 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -330,7 +330,6 @@ static struct fixed_voltage_config pandora_vwlan = { .supply_name = "vwlan", .microvolts = 1800000, /* 1.8V */ .startup_delay = 50000, /* 50ms */ - .enable_high = 1, .init_data = &pandora_vmmc3, }; diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c index fa8e7dd4d898..4401dfcd7e68 100644 --- a/arch/arm/mach-pxa/cm-x255.c +++ b/arch/arm/mach-pxa/cm-x255.c @@ -98,7 +98,7 @@ static unsigned long cmx255_pin_config[] = { }; #if defined(CONFIG_SPI_PXA2XX) -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index f7081a50dc67..279eeca7add0 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c @@ -313,7 +313,7 @@ static inline void cmx270_init_mmc(void) {} #endif #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master cm_x270_spi_info = { +static struct pxa2xx_spi_controller cm_x270_spi_info = { .num_chipselect = 1, .enable_dma = 1, }; diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index c9732cace5e3..7ecf559bd71c 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -530,7 +530,7 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = { }; #if IS_ENABLED(CONFIG_SPI_PXA2XX) -static struct pxa2xx_spi_master corgi_spi_info = { +static struct pxa2xx_spi_controller corgi_spi_info = { .num_chipselect = 3, }; diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index a24783a03827..524d6093e0c7 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -1065,7 +1065,7 @@ struct platform_device pxa93x_device_gpio = { /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ -void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) +void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info) { struct platform_device *pd; diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 32c1edeb3f14..fa3adb073a0f 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -689,7 +689,7 @@ static inline void em_x270_init_lcd(void) {} #endif #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master em_x270_spi_info = { +static struct pxa2xx_spi_controller em_x270_spi_info = { .num_chipselect = 1, }; @@ -703,7 +703,7 @@ static struct tdo24m_platform_data em_x270_tdo24m_pdata = { .model = TDO35S, }; -static struct pxa2xx_spi_master em_x270_spi_2_info = { +static struct pxa2xx_spi_controller em_x270_spi_2_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -976,7 +976,6 @@ static struct fixed_voltage_config camera_dummy_config = { .supply_name = "camera_vdd", .input_supply = "vcc cam", .microvolts = 2800000, - .enable_high = 0, .init_data = &camera_dummy_initdata, }; diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c index 565965e9acc7..5e110e70ce5a 100644 --- a/arch/arm/mach-pxa/ezx.c +++ b/arch/arm/mach-pxa/ezx.c @@ -714,7 +714,6 @@ static struct regulator_init_data camera_regulator_initdata = { static struct fixed_voltage_config camera_regulator_config = { .supply_name = "camera_vdd", .microvolts = 2800000, - .enable_high = 0, .init_data = &camera_regulator_initdata, }; @@ -730,7 +729,7 @@ static struct gpiod_lookup_table camera_supply_gpiod_table = { .dev_id = "reg-fixed-voltage.1", .table = { GPIO_LOOKUP("gpio-pxa", GPIO50_nCAM_EN, - NULL, GPIO_ACTIVE_HIGH), + NULL, GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index b79b757fdd41..1d6b1d2fb6a9 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/fb.h> +#include <linux/gpio/machine.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> @@ -629,7 +630,7 @@ static struct spi_board_info tsc2046_board_info[] __initdata = { }, }; -static struct pxa2xx_spi_master pxa_ssp2_master_info = { +static struct pxa2xx_spi_controller pxa_ssp2_master_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -702,9 +703,7 @@ static struct regulator_init_data bq24022_init_data = { .consumer_supplies = bq24022_consumers, }; -static struct gpio bq24022_gpios[] = { - { GPIO96_HX4700_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" }, -}; +static enum gpiod_flags bq24022_gpiod_gflags[] = { GPIOD_OUT_LOW }; static struct gpio_regulator_state bq24022_states[] = { { .value = 100000, .gpios = (0 << 0) }, @@ -714,12 +713,10 @@ static struct gpio_regulator_state bq24022_states[] = { static struct gpio_regulator_config bq24022_info = { .supply_name = "bq24022", - .enable_gpio = GPIO72_HX4700_BQ24022_nCHARGE_EN, - .enable_high = 0, .enabled_at_boot = 0, - .gpios = bq24022_gpios, - .nr_gpios = ARRAY_SIZE(bq24022_gpios), + .gflags = bq24022_gpiod_gflags, + .ngpios = ARRAY_SIZE(bq24022_gpiod_gflags), .states = bq24022_states, .nr_states = ARRAY_SIZE(bq24022_states), @@ -736,6 +733,17 @@ static struct platform_device bq24022 = { }, }; +static struct gpiod_lookup_table bq24022_gpiod_table = { + .dev_id = "gpio-regulator", + .table = { + GPIO_LOOKUP("gpio-pxa", GPIO96_HX4700_BQ24022_ISET2, + NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO72_HX4700_BQ24022_nCHARGE_EN, + "enable", GPIO_ACTIVE_LOW), + { }, + }, +}; + /* * StrataFlash */ @@ -878,6 +886,7 @@ static void __init hx4700_init(void) pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); + gpiod_add_lookup_table(&bq24022_gpiod_table); platform_add_devices(devices, ARRAY_SIZE(devices)); pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup)); diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c index cbaf4f6edcda..7e30452e3840 100644 --- a/arch/arm/mach-pxa/icontrol.c +++ b/arch/arm/mach-pxa/icontrol.c @@ -115,12 +115,12 @@ static struct spi_board_info mcp251x_board_info[] = { } }; -static struct pxa2xx_spi_master pxa_ssp3_spi_master_info = { +static struct pxa2xx_spi_controller pxa_ssp3_spi_master_info = { .num_chipselect = 2, .enable_dma = 1 }; -static struct pxa2xx_spi_master pxa_ssp4_spi_master_info = { +static struct pxa2xx_spi_controller pxa_ssp4_spi_master_info = { .num_chipselect = 2, .enable_dma = 1 }; diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index 39db4898dc4a..464b8bd2bcb9 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c @@ -191,7 +191,7 @@ static inline void littleton_init_lcd(void) {}; #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master littleton_spi_info = { +static struct pxa2xx_spi_controller littleton_spi_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index a1391e113ef4..c1bd0d544981 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c @@ -197,7 +197,7 @@ static struct platform_device sa1111_device = { * (to J5) and poking board registers (as done below). Else it's only useful * for the temperature sensors. */ -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 08b079653c3f..75abc21083eb 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -645,9 +645,8 @@ static struct regulator_init_data bq24022_init_data = { .consumer_supplies = bq24022_consumers, }; -static struct gpio bq24022_gpios[] = { - { EGPIO_MAGICIAN_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" }, -}; + +static enum gpiod_flags bq24022_gpiod_gflags[] = { GPIOD_OUT_LOW }; static struct gpio_regulator_state bq24022_states[] = { { .value = 100000, .gpios = (0 << 0) }, @@ -657,12 +656,10 @@ static struct gpio_regulator_state bq24022_states[] = { static struct gpio_regulator_config bq24022_info = { .supply_name = "bq24022", - .enable_gpio = GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, - .enable_high = 0, .enabled_at_boot = 1, - .gpios = bq24022_gpios, - .nr_gpios = ARRAY_SIZE(bq24022_gpios), + .gflags = bq24022_gpiod_gflags, + .ngpios = ARRAY_SIZE(bq24022_gpiod_gflags), .states = bq24022_states, .nr_states = ARRAY_SIZE(bq24022_states), @@ -679,6 +676,17 @@ static struct platform_device bq24022 = { }, }; +static struct gpiod_lookup_table bq24022_gpiod_table = { + .dev_id = "gpio-regulator", + .table = { + GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2, + NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, + "enable", GPIO_ACTIVE_LOW), + { }, + }, +}; + /* * fixed regulator for ads7846 */ @@ -932,7 +940,7 @@ struct pxa2xx_spi_chip tsc2046_chip_info = { .gpio_cs = GPIO14_MAGICIAN_TSC2046_CS, }; -static struct pxa2xx_spi_master magician_spi_info = { +static struct pxa2xx_spi_controller magician_spi_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -1027,6 +1035,7 @@ static void __init magician_init(void) regulator_register_always_on(0, "power", pwm_backlight_supply, ARRAY_SIZE(pwm_backlight_supply), 5000000); + gpiod_add_lookup_table(&bq24022_gpiod_table); platform_add_devices(ARRAY_AND_SIZE(devices)); } diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c index ccca9f7575c3..e2e613449660 100644 --- a/arch/arm/mach-pxa/pcm027.c +++ b/arch/arm/mach-pxa/pcm027.c @@ -132,7 +132,7 @@ static struct platform_device smc91x_device = { /* * SPI host and devices */ -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index c2a43d4cfd3e..9450a523cd0b 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -196,7 +196,7 @@ struct platform_device poodle_locomo_device = { EXPORT_SYMBOL(poodle_locomo_device); #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master poodle_spi_info = { +static struct pxa2xx_spi_controller poodle_spi_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index e1db072756f2..e13bfc9b01d2 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -883,7 +883,6 @@ static struct regulator_init_data audio_va_initdata = { static struct fixed_voltage_config audio_va_config = { .supply_name = "audio_va", .microvolts = 5000000, - .enable_high = 1, .enabled_at_boot = 0, .init_data = &audio_va_initdata, }; diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 306818e2cf54..8dac824a85df 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -572,7 +572,7 @@ static struct spi_board_info spitz_spi_devices[] = { }, }; -static struct pxa2xx_spi_master spitz_spi_info = { +static struct pxa2xx_spi_controller spitz_spi_info = { .num_chipselect = 3, }; diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c index e0d6c872270a..c28d19b126a7 100644 --- a/arch/arm/mach-pxa/stargate2.c +++ b/arch/arm/mach-pxa/stargate2.c @@ -337,15 +337,15 @@ static struct platform_device stargate2_flash_device = { .num_resources = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_0_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_0_info = { .num_chipselect = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_1_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_1_info = { .num_chipselect = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_2_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_2_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index e8a93c088c35..7439798d58e4 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -813,7 +813,7 @@ static struct platform_device tosa_bt_device = { .dev.platform_data = &tosa_bt_data, }; -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index e2353e75bb28..ad082e11e2a4 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -607,12 +607,12 @@ static struct spi_board_info spi_board_info[] __initdata = { }, }; -static struct pxa2xx_spi_master pxa_ssp1_master_info = { +static struct pxa2xx_spi_controller pxa_ssp1_master_info = { .num_chipselect = 1, .enable_dma = 1, }; -static struct pxa2xx_spi_master pxa_ssp2_master_info = { +static struct pxa2xx_spi_controller pxa_ssp2_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index c411f79d4cb5..3fd1119c14d5 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -391,7 +391,7 @@ static struct platform_device zeus_sram_device = { }; /* SPI interface on SSP3 */ -static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = { +static struct pxa2xx_spi_controller pxa2xx_spi_ssp3_master_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -426,7 +426,7 @@ static struct gpiod_lookup_table can_regulator_gpiod_table = { .dev_id = "reg-fixed-voltage.0", .table = { GPIO_LOOKUP("gpio-pxa", ZEUS_CAN_SHDN_GPIO, - NULL, GPIO_ACTIVE_HIGH), + NULL, GPIO_ACTIVE_LOW), { }, }, }; @@ -547,7 +547,6 @@ static struct regulator_init_data zeus_ohci_regulator_data = { static struct fixed_voltage_config zeus_ohci_regulator_config = { .supply_name = "vbus2", .microvolts = 5000000, /* 5.0V */ - .enable_high = 1, .startup_delay = 0, .init_data = &zeus_ohci_regulator_data, }; diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index dfa42496ec27..d09c3f236186 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -469,7 +469,6 @@ static struct regulator_consumer_supply assabet_cf_vcc_consumers[] = { static struct fixed_voltage_config assabet_cf_vcc_pdata __initdata = { .supply_name = "cf-power", .microvolts = 3300000, - .enable_high = 1, }; static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = { diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1e2922e447c..1e3e08a1c456 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev) return; arm_teardown_iommu_dma_ops(dev); + /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ + set_dma_ops(dev, NULL); } diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 2c118a6ab358..0dc23fc227ed 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or } /* Copy arch-dep-instance from template. */ - memcpy(code, (unsigned char *)optprobe_template_entry, + memcpy(code, (unsigned long *)&optprobe_template_entry, TMPL_END_IDX * sizeof(kprobe_opcode_t)); /* Adjust buffer according to instruction. */ diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index b0b80c0f09f3..b11bba542fac 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -113,8 +113,7 @@ ENTRY(privcmd_call) /* * Disable userspace access from kernel. This is fine to do it - * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is - * called before. + * unconditionally as no set_fs(KERNEL_DS) is called before. */ uaccess_disable r4 diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index 5b4a9609e31f..2468762283a5 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -351,7 +351,7 @@ reg = <0>; pinctrl-names = "default"; pinctrl-0 = <&cp0_copper_eth_phy_reset>; - reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>; + reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>; reset-assert-us = <10000>; }; diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index 8d41b69ec2da..99bccaac31ad 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -37,7 +37,7 @@ }; memory@86200000 { - reg = <0x0 0x86200000 0x0 0x2600000>; + reg = <0x0 0x86200000 0x0 0x2d00000>; no-map; }; diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S index 021bb9e9784b..706c4e10e9e2 100644 --- a/arch/arm64/crypto/chacha-neon-core.S +++ b/arch/arm64/crypto/chacha-neon-core.S @@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon) mov w3, w2 bl chacha_permute - st1 {v0.16b}, [x1], #16 - st1 {v3.16b}, [x1] + st1 {v0.4s}, [x1], #16 + st1 {v3.4s}, [x1] ldp x29, x30, [sp], #16 ret @@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon) add v3.4s, v3.4s, v19.4s add a2, a2, w8 add a3, a3, w9 +CPU_BE( rev a0, a0 ) +CPU_BE( rev a1, a1 ) +CPU_BE( rev a2, a2 ) +CPU_BE( rev a3, a3 ) ld4r {v24.4s-v27.4s}, [x0], #16 ld4r {v28.4s-v31.4s}, [x0] @@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon) add v7.4s, v7.4s, v23.4s add a6, a6, w8 add a7, a7, w9 +CPU_BE( rev a4, a4 ) +CPU_BE( rev a5, a5 ) +CPU_BE( rev a6, a6 ) +CPU_BE( rev a7, a7 ) // x8[0-3] += s2[0] // x9[0-3] += s2[1] @@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon) add v11.4s, v11.4s, v27.4s add a10, a10, w8 add a11, a11, w9 +CPU_BE( rev a8, a8 ) +CPU_BE( rev a9, a9 ) +CPU_BE( rev a10, a10 ) +CPU_BE( rev a11, a11 ) // x12[0-3] += s3[0] // x13[0-3] += s3[1] @@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon) add v15.4s, v15.4s, v31.4s add a14, a14, w8 add a15, a15, w9 +CPU_BE( rev a12, a12 ) +CPU_BE( rev a13, a13 ) +CPU_BE( rev a14, a14 ) +CPU_BE( rev a15, a15 ) // interleave 32-bit words in state n, n+1 ldp w6, w7, [x2], #64 diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h index 2ba6c6b9541f..71abfc7612b2 100644 --- a/arch/arm64/include/asm/neon-intrinsics.h +++ b/arch/arm64/include/asm/neon-intrinsics.h @@ -36,4 +36,8 @@ #include <arm_neon.h> #endif +#ifdef CONFIG_CC_IS_CLANG +#pragma clang diagnostic ignored "-Wincompatible-pointer-types" +#endif + #endif /* __ASM_NEON_INTRINSICS_H */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 547d7a0c9d05..f1e5c9165809 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -34,7 +34,6 @@ #include <asm/memory.h> #include <asm/extable.h> -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 15d79a8e5e5e..eecf7927dab0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -539,8 +539,7 @@ set_hcr: /* GICv3 system register access */ mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #24, #4 - cmp x0, #1 - b.ne 3f + cbz x0, 3f mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9dce33b0e260..ddaea0fd2fa4 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs) } /* - * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a - * We also take into account DIT (bit 24), which is not yet documented, and - * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be - * allocated an EL0 meaning in future. + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. + * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is + * not described in ARM DDI 0487D.a. + * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may + * be allocated an EL0 meaning in future. * Userspace cannot use these until they have an architectural meaning. * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ - GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) static int valid_compat_regs(struct user_pt_regs *regs) { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index d09ec76f08cf..009849328289 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -339,6 +339,9 @@ void __init setup_arch(char **cmdline_p) smp_init_cpus(); smp_build_mpidr_hash(); + /* Init percpu seeds for random tags after cpus are set up. */ + kasan_init_tags(); + #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Make sure init_thread_info.ttbr0 always generates translation diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 4b55b15707a3..f37a86d2a69d 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -252,8 +252,6 @@ void __init kasan_init(void) memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); - kasan_init_tags(); - /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index ffdc4c47ff43..db2640d5f575 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -9,7 +9,6 @@ typedef struct { } mm_segment_t; #define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) -#define get_ds() KERNEL_DS #define USER_DS ((mm_segment_t) { 0x80000000UL }) #define get_fs() (current_thread_info()->addr_limit) diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h index 9adbf7e1aaa3..a407978f9f9f 100644 --- a/arch/h8300/include/asm/segment.h +++ b/arch/h8300/include/asm/segment.h @@ -33,12 +33,6 @@ static inline mm_segment_t get_fs(void) return USER_DS; } -static inline mm_segment_t get_ds(void) -{ - /* return the supervisor data space code */ - return KERNEL_DS; -} - #define segment_eq(a, b) ((a).seg == (b).seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 306d469e43da..89782ad3fb88 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -48,7 +48,6 @@ #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index 0b4cc1e079b5..c6686559e9b7 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h @@ -45,16 +45,9 @@ static inline void set_fs(mm_segment_t val) : /* no outputs */ : "r" (val.seg) : "memory"); } -static inline mm_segment_t get_ds(void) -{ - /* return the supervisor data space code */ - return KERNEL_DS; -} - #else #define USER_DS MAKE_MM_SEG(TASK_SIZE) #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #endif diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index dbfea093a7c7..bff2a71c828a 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -42,7 +42,6 @@ # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) # endif -# define get_ds() (KERNEL_DS) # define get_fs() (current_thread_info()->addr_limit) # define set_fs(val) (current_thread_info()->addr_limit = (val)) diff --git a/arch/mips/ath79/dev-spi.h b/arch/mips/ath79/dev-spi.h index d732565ca736..6e15bc8651be 100644 --- a/arch/mips/ath79/dev-spi.h +++ b/arch/mips/ath79/dev-spi.h @@ -13,7 +13,7 @@ #define _ATH79_DEV_SPI_H #include <linux/spi/spi.h> -#include <asm/mach-ath79/ath79_spi_platform.h> +#include <linux/platform_data/spi-ath79.h> void ath79_register_spi(struct ath79_spi_platform_data *pdata, struct spi_board_info const *info, diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 07b4c65a88a4..8e73d65f3480 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = { static int shared_device_registered; +static u64 enet_dmamask = DMA_BIT_MASK(32); + static struct resource enet0_res[] = { { .start = -1, /* filled at runtime */ @@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = { .resource = enet0_res, .dev = { .platform_data = &enet0_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = { .resource = enet1_res, .dev = { .platform_data = &enet1_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = { .resource = enetsw_res, .dev = { .platform_data = &enetsw_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index d43c1dc6ef15..62b298c50905 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -69,7 +69,6 @@ extern u64 __ua_limit; #define USER_DS ((mm_segment_t) { __UA_LIMIT }) #endif -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index 0b9535bc2c53..6b2a4a902a98 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { - u32 mask, old32, new32, load32; + u32 mask, old32, new32, load32, load; volatile u32 *ptr32; unsigned int shift; - u8 load; /* Check that ptr is naturally aligned */ WARN_ON((unsigned long)ptr & (size - 1)); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 2ea0ec95efe9..4b5e1f2bfbce 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -86,7 +86,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return -EFAULT; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); @@ -111,7 +111,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, ip -= 4; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); @@ -135,7 +135,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, return -EFAULT; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 149100e1bc7c..6e574c02e4c3 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -212,7 +212,7 @@ void kgdb_call_nmi_hook(void *ignored) mm_segment_t old_fs; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); kgdb_nmicallback(raw_smp_processor_id(), NULL); @@ -318,7 +318,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, /* Kernel mode. Set correct address limit */ old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); if (atomic_read(&kgdb_active) != -1) kgdb_nmicallback(smp_processor_id(), regs); diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8c6c48ed786a..d2e5a5ad0e6f 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -384,7 +384,8 @@ static void __init bootmem_init(void) init_initrd(); reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); - memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT); + memblock_reserve(PHYS_OFFSET, + (reserved_end << PAGE_SHIFT) - PHYS_OFFSET); /* * max_low_pfn is not a number of pages. The number of pages diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c91097f7b32f..cbab46004e99 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1077,7 +1077,7 @@ asmlinkage void do_tr(struct pt_regs *regs) seg = get_fs(); if (!user_mode(regs)) - set_fs(get_ds()); + set_fs(KERNEL_DS); prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c index 577ec81b557d..3deab9a77718 100644 --- a/arch/mips/lantiq/xway/vmmc.c +++ b/arch/mips/lantiq/xway/vmmc.c @@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev) dma_addr_t dma; cp1_base = - (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE, - &dma, GFP_ATOMIC)); + (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE, + &dma, GFP_KERNEL)); gpio_count = of_gpio_count(pdev->dev.of_node); while (gpio_count > 0) { diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index b16710a8a9e7..0effd3cba9a7 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -79,8 +79,6 @@ enum reg_val_type { REG_64BIT_32BIT, /* 32-bit compatible, need truncation for 64-bit ops. */ REG_32BIT, - /* 32-bit zero extended. */ - REG_32BIT_ZERO_EX, /* 32-bit no sign/zero extension needed. */ REG_32BIT_POS }; @@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) const struct bpf_prog *prog = ctx->skf; int stack_adjust = ctx->stack_size; int store_offset = stack_adjust - 8; + enum reg_val_type td; int r0 = MIPS_R_V0; - if (dest_reg == MIPS_R_RA && - get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) + if (dest_reg == MIPS_R_RA) { /* Don't let zero extended value escape. */ - emit_instr(ctx, sll, r0, r0, 0); + td = get_reg_val_type(ctx, prog->len, BPF_REG_0); + if (td == REG_64BIT) + emit_instr(ctx, sll, r0, r0, 0); + } if (ctx->flags & EBPF_SAVE_RA) { emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); @@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) + if (td == REG_64BIT) /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); if (insn->imm == 1) { @@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (src < 0 || dst < 0) return -EINVAL; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } did_move = false; ts = get_reg_val_type(ctx, this_idx, insn->src_reg); - if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { + if (ts == REG_64BIT) { int tmp_reg = MIPS_R_AT; if (bpf_op == BPF_MOV) { @@ -1254,8 +1255,7 @@ jeq_common: if (insn->imm == 64 && td == REG_32BIT) emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - if (insn->imm != 64 && - (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { + if (insn->imm != 64 && td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -1819,7 +1819,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* Update the icache */ flush_icache_range((unsigned long)ctx.target, - (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); + (unsigned long)&ctx.target[ctx.idx]); if (bpf_jit_enable > 1) /* Dump JIT code */ diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 53dcb49b0b12..116598b47c4d 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -37,7 +37,6 @@ extern int fixup_exception(struct pt_regs *regs); #define KERNEL_DS ((mm_segment_t) { ~0UL }) #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define user_addr_max get_fs diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index ab7ab46234b1..9712fd474f2c 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -121,7 +121,7 @@ void show_regs(struct pt_regs *regs) regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]); pr_info(" IRQs o%s Segment %s\n", interrupts_enabled(regs) ? "n" : "ff", - segment_eq(get_fs(), get_ds())? "kernel" : "user"); + segment_eq(get_fs(), KERNEL_DS)? "kernel" : "user"); } EXPORT_SYMBOL(show_regs); diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index e0ea10806491..e83f831a76f9 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -26,7 +26,6 @@ #define USER_DS MAKE_MM_SEG(0x80000000UL) #define KERNEL_DS MAKE_MM_SEG(0) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index a44682c8adc3..45afd9ab78c1 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -42,7 +42,6 @@ */ #define KERNEL_DS (~0UL) -#define get_ds() (KERNEL_DS) #define USER_DS (TASK_SIZE) #define get_fs() (current_thread_info()->addr_limit) diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 30ac2865ea73..ebbb9ffe038c 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -16,7 +16,6 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2582df1c529b..0964c236e3e5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + if (test_thread_flag(TIF_SYSCALL_TRACE)) { + int rc = tracehook_report_syscall_entry(regs); + /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip - * the system call and the system call restart handling. + * As tracesys_next does not set %r28 to -ENOSYS + * when %r20 is set to -1, initialize it here. */ - regs->gr[20] = -1UL; - goto out; + regs->gr[28] = -ENOSYS; + + if (rc) { + /* + * A nonzero return code from + * tracehook_report_syscall_entry() tells us + * to prevent the syscall execution. Skip + * the syscall call and the syscall restart handling. + * + * Note that the tracer may also just change + * regs->gr[20] to an invalid syscall number, + * that is handled by tracesys_next. + */ + regs->gr[20] = -1UL; + return -1; + } } /* Do the secure computing check after ptrace. */ @@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) regs->gr[24] & 0xffffffff, regs->gr[23] & 0xffffffff); -out: /* * Sign extend the syscall number to 64bit since it may have been * modified by a compat ptrace call diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index e3a731793ea2..4d6d905e9138 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -28,7 +28,6 @@ #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) #endif -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7db3119f8a5b..145373f0e5dc 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) pnv_pci_ioda2_setup_dma_pe(phb, pe); #ifdef CONFIG_IOMMU_API + iommu_register_group(&pe->table_group, + pe->phb->hose->global_number, pe->pe_number); pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); #endif } diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 45fb70b4bfa7..ef9448a907c6 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, return 0; pe = &phb->ioda.pe_array[pdn->pe_number]; + if (!pe->table_group.group) + return 0; iommu_add_device(&pe->table_group, dev); return 0; case BUS_NOTIFY_DEL_DEVICE: diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 637b896894fc..a00168b980d2 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -41,7 +41,6 @@ #define KERNEL_DS (~0UL) #define USER_DS (TASK_SIZE) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index bd2545977ad3..007fcb9aeeb8 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -31,7 +31,6 @@ #define USER_DS (2) #define USER_DS_SACF (3) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.mm_segment) #define segment_eq(a,b) (((a) & 2) == ((b) & 2)) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a153257bf7d9..d62fa148558b 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->crycbd = 0; apie_h = vcpu->arch.sie_block->eca & ECA_APIE; - if (!apie_h && !key_msk) + if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0)) return 0; if (!crycb_addr) diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 22b4106b8084..5495efa07335 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -630,7 +630,6 @@ static struct regulator_init_data cn12_power_init_data = { static struct fixed_voltage_config cn12_power_info = { .supply_name = "CN12 SD/MMC Vdd", .microvolts = 3300000, - .enable_high = 1, .init_data = &cn12_power_init_data, }; @@ -671,7 +670,6 @@ static struct regulator_init_data sdhi0_power_init_data = { static struct fixed_voltage_config sdhi0_power_info = { .supply_name = "CN11 SD/MMC Vdd", .microvolts = 3300000, - .enable_high = 1, .init_data = &sdhi0_power_init_data, }; diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile index 01d0f7fb14cc..2563d1e532e2 100644 --- a/arch/sh/boot/dts/Makefile +++ b/arch/sh/boot/dts/Makefile @@ -1,3 +1,3 @@ ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") -obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o +obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o endif diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 101c13c0c6ad..33d1d28057cb 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h @@ -26,7 +26,6 @@ typedef struct { #define segment_eq(a, b) ((a).seg == (b).seg) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 5153798051fb..d6d8413eca83 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -25,7 +25,6 @@ #define KERNEL_DS ((mm_segment_t) { 0 }) #define USER_DS ((mm_segment_t) { -1 }) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 87ae9ffb1521..bf9d330073b2 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -31,7 +31,6 @@ #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) -#define get_ds() (KERNEL_DS) #define segment_eq(a, b) ((a).seg == (b).seg) diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 705dafc2d11a..2bdbbbcfa393 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -841,7 +841,7 @@ union hv_gpa_page_range { * count is equal with how many entries of union hv_gpa_page_range can * be populated into the input parameter page. */ -#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \ +#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) / \ sizeof(union hv_gpa_page_range)) struct hv_guest_mapping_flush_list { diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4660ce90de7f..180373360e34 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -299,6 +299,7 @@ union kvm_mmu_extended_role { unsigned int cr4_smap:1; unsigned int cr4_smep:1; unsigned int cr4_la57:1; + unsigned int maxphyaddr:6; }; }; @@ -397,6 +398,7 @@ struct kvm_mmu { void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte); hpa_t root_hpa; + gpa_t root_cr3; union kvm_mmu_role mmu_role; u8 root_level; u8 shadow_root_level; diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 780f2b42c8ef..5e49a0acb5ee 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -25,7 +25,6 @@ #define KERNEL_DS MAKE_MM_SEG(-1UL) #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.addr_limit) static inline void set_fs(mm_segment_t fs) { @@ -284,7 +283,7 @@ do { \ __put_user_goto(x, ptr, "l", "k", "ir", label); \ break; \ case 8: \ - __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \ + __put_user_goto_u64(x, ptr, label); \ break; \ default: \ __put_user_bad(); \ @@ -431,8 +430,10 @@ do { \ ({ \ __label__ __pu_label; \ int __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __pu_val; \ + __pu_val = x; \ __uaccess_begin(); \ - __put_user_size((x), (ptr), (size), __pu_label); \ + __put_user_size(__pu_val, (ptr), (size), __pu_label); \ __pu_err = 0; \ __pu_label: \ __uaccess_end(); \ diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index bbffa6c54697..c07958b59f50 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; + unsigned f_la57 = 0; /* cpuid 1.edx */ const u32 kvm_cpuid_1_edx_x86_features = @@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, // TSC_ADJUST is emulated entry->ebx |= F(TSC_ADJUST); entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; + f_la57 = entry->ecx & F(LA57); cpuid_mask(&entry->ecx, CPUID_7_ECX); + /* Set LA57 based on hardware capability. */ + entry->ecx |= f_la57; entry->ecx |= f_umip; /* PKU is not yet implemented for shadow paging. */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index da9c42349b1f..f2d1d230d5b8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, &invalid_list); mmu->root_hpa = INVALID_PAGE; } + mmu->root_cr3 = 0; } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); @@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); } else BUG(); + vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); return 0; } @@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; u64 pdptr, pm_mask; - gfn_t root_gfn; + gfn_t root_gfn, root_cr3; int i; - root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; + root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); + root_gfn = root_cr3 >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; @@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu->root_hpa = root; - return 0; + goto set_root_cr3; } /* @@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); } +set_root_cr3: + vcpu->arch.mmu->root_cr3 = root_cr3; + return 0; } @@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, struct kvm_mmu_root_info root; struct kvm_mmu *mmu = vcpu->arch.mmu; - root.cr3 = mmu->get_cr3(vcpu); + root.cr3 = mmu->root_cr3; root.hpa = mmu->root_hpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { @@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, } mmu->root_hpa = root.hpa; + mmu->root_cr3 = root.cr3; return i < KVM_MMU_NUM_PREV_ROOTS; } @@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) ext.cr4_pse = !!is_pse(vcpu); ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); + ext.maxphyaddr = cpuid_maxphyaddr(vcpu); ext.valid = 1; @@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.root_mmu.root_cr3 = 0; vcpu->arch.root_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.guest_mmu.root_cr3 = 0; vcpu->arch.guest_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 6521134057e8..3c4568f8fb28 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -117,67 +117,12 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, } EXPORT_SYMBOL_GPL(ex_handler_fprestore); -/* Helper to check whether a uaccess fault indicates a kernel bug. */ -static bool bogus_uaccess(struct pt_regs *regs, int trapnr, - unsigned long fault_addr) -{ - /* This is the normal case: #PF with a fault address in userspace. */ - if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX) - return false; - - /* - * This code can be reached for machine checks, but only if the #MC - * handler has already decided that it looks like a candidate for fixup. - * This e.g. happens when attempting to access userspace memory which - * the CPU can't access because of uncorrectable bad memory. - */ - if (trapnr == X86_TRAP_MC) - return false; - - /* - * There are two remaining exception types we might encounter here: - * - #PF for faulting accesses to kernel addresses - * - #GP for faulting accesses to noncanonical addresses - * Complain about anything else. - */ - if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) { - WARN(1, "unexpected trap %d in uaccess\n", trapnr); - return false; - } - - /* - * This is a faulting memory access in kernel space, on a kernel - * address, in a usercopy function. This can e.g. be caused by improper - * use of helpers like __put_user and by improper attempts to access - * userspace addresses in KERNEL_DS regions. - * The one (semi-)legitimate exception are probe_kernel_{read,write}(), - * which can be invoked from places like kgdb, /dev/mem (for reading) - * and privileged BPF code (for reading). - * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag - * to tell us that faulting on kernel addresses, and even noncanonical - * addresses, in a userspace accessor does not necessarily imply a - * kernel bug, root might just be doing weird stuff. - */ - if (current->kernel_uaccess_faults_ok) - return false; - - /* This is bad. Refuse the fixup so that we go into die(). */ - if (trapnr == X86_TRAP_PF) { - pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n", - fault_addr); - } else { - pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n"); - } - return true; -} - __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr, unsigned long error_code, unsigned long fault_addr) { - if (bogus_uaccess(regs, trapnr, fault_addr)) - return false; + WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); regs->ip = ex_fixup_addr(fixup); return true; } @@ -188,8 +133,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup, unsigned long error_code, unsigned long fault_addr) { - if (bogus_uaccess(regs, trapnr, fault_addr)) - return false; /* Special hack for uaccess_err */ current->thread.uaccess_err = 1; regs->ip = ex_fixup_addr(fixup); diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c index 96f438d4b026..1421d5330b2c 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c @@ -44,7 +44,6 @@ static struct fixed_voltage_config bcm43xx_vmmc = { */ .microvolts = 2000000, /* 1.8V */ .startup_delay = 250 * 1000, /* 250ms */ - .enable_high = 1, /* active high */ .enabled_at_boot = 0, /* disabled at boot */ .init_data = &bcm43xx_vmmc_data, }; diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h index dfdf9fae1f84..7f6cf4151843 100644 --- a/arch/xtensa/include/asm/asm-uaccess.h +++ b/arch/xtensa/include/asm/asm-uaccess.h @@ -32,8 +32,6 @@ #define KERNEL_DS 0 #define USER_DS 1 -#define get_ds (KERNEL_DS) - /* * get_fs reads current->thread.current_ds into a register. * On Entry: diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 4b2480304bc3..6792928ba84a 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -32,7 +32,6 @@ #define KERNEL_DS ((mm_segment_t) { 0 }) #define USER_DS ((mm_segment_t) { 1 }) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 17eb09d222ff..ec78a04eb136 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) int af_alg_release(struct socket *sock) { - if (sock->sk) + if (sock->sk) { sock_put(sock->sk); + sock->sk = NULL; + } return 0; } EXPORT_SYMBOL_GPL(af_alg_release); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 0ea2139c50d8..ccd296dbb95c 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -95,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - hrtimer_cancel(&dev->power.suspend_timer); + hrtimer_try_to_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 2e8f0144f9ab..9cbb4b0cd01b 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -33,7 +33,7 @@ struct regcache_rbtree_node { unsigned int blklen; /* the actual rbtree node holding this block */ struct rb_node node; -} __attribute__ ((packed)); +}; struct regcache_rbtree_ctx { struct rb_root root; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 330c1f7e9665..5059748afd4c 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -35,6 +35,7 @@ struct regmap_irq_chip_data { int wake_count; void *status_reg_buf; + unsigned int *main_status_buf; unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; @@ -329,6 +330,33 @@ static const struct irq_chip regmap_irq_chip = { .irq_set_wake = regmap_irq_set_wake, }; +static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, + unsigned int b) +{ + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + struct regmap_irq_sub_irq_map *subreg; + int i, ret = 0; + + if (!chip->sub_reg_offsets) { + /* Assume linear mapping */ + ret = regmap_read(map, chip->status_base + + (b * map->reg_stride * data->irq_reg_stride), + &data->status_buf[b]); + } else { + subreg = &chip->sub_reg_offsets[b]; + for (i = 0; i < subreg->num_regs; i++) { + unsigned int offset = subreg->offset[i]; + + ret = regmap_read(map, chip->status_base + offset, + &data->status_buf[offset]); + if (ret) + break; + } + } + return ret; +} + static irqreturn_t regmap_irq_thread(int irq, void *d) { struct regmap_irq_chip_data *data = d; @@ -352,11 +380,65 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } /* - * Read in the statuses, using a single bulk read if possible - * in order to reduce the I/O overheads. + * Read only registers with active IRQs if the chip has 'main status + * register'. Else read in the statuses, using a single bulk read if + * possible in order to reduce the I/O overheads. */ - if (!map->use_single_read && map->reg_stride == 1 && - data->irq_reg_stride == 1) { + + if (chip->num_main_regs) { + unsigned int max_main_bits; + unsigned long size; + + size = chip->num_regs * sizeof(unsigned int); + + max_main_bits = (chip->num_main_status_bits) ? + chip->num_main_status_bits : chip->num_regs; + /* Clear the status buf as we don't read all status regs */ + memset(data->status_buf, 0, size); + + /* We could support bulk read for main status registers + * but I don't expect to see devices with really many main + * status registers so let's only support single reads for the + * sake of simplicity. and add bulk reads only if needed + */ + for (i = 0; i < chip->num_main_regs; i++) { + ret = regmap_read(map, chip->main_status + + (i * map->reg_stride + * data->irq_reg_stride), + &data->main_status_buf[i]); + if (ret) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + goto exit; + } + } + + /* Read sub registers with active IRQs */ + for (i = 0; i < chip->num_main_regs; i++) { + unsigned int b; + const unsigned long mreg = data->main_status_buf[i]; + + for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { + if (i * map->format.val_bytes * 8 + b > + max_main_bits) + break; + ret = read_sub_irq_data(data, b); + + if (ret != 0) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + if (chip->runtime_pm) + pm_runtime_put(map->dev); + goto exit; + } + } + + } + } else if (!map->use_single_read && map->reg_stride == 1 && + data->irq_reg_stride == 1) { + u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; u32 *buf32 = data->status_reg_buf; @@ -521,6 +603,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d) return -ENOMEM; + if (chip->num_main_regs) { + d->main_status_buf = kcalloc(chip->num_main_regs, + sizeof(unsigned int), + GFP_KERNEL); + + if (!d->main_status_buf) + goto err_alloc; + } + d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->status_buf) diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 2fe225a697df..3487e03d4bc6 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, - nck(at91sam9x5_systemck), - nck(at91sam9x35_periphck), 0); + nck(at91sam9x5_systemck), 31, 0); if (!at91sam9x5_pmc) return; @@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 2; i++) { char name[6]; diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index d69ad96fe988..cd0ef7274fdb 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 3; i++) { char name[6]; @@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; parent_names[5] = "audiopll_pmcck"; for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) { hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index e358be7f6c8d..b645a9d59cdb 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 3; i++) { char name[6]; diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 3b97f60540ad..609970c0b666 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1", static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1", 0x060, BIT(10), 0); static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1", - 0x060, BIT(12), 0); + 0x060, BIT(11), 0); static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1", - 0x060, BIT(13), 0); + 0x060, BIT(12), 0); static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1", 0x060, BIT(13), 0); static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 621b1cd996db..ac12f261f8ca 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, [RST_BUS_VE] = { 0x2c4, BIT(0) }, - [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, + [RST_BUS_TCON0] = { 0x2c4, BIT(4) }, [RST_BUS_CSI] = { 0x2c4, BIT(8) }, [RST_BUS_DE] = { 0x2c4, BIT(12) }, [RST_BUS_DBG] = { 0x2c4, BIT(31) }, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 242c3370544e..9ed46d188cb5 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - kfree(priv); dev_pm_opp_remove_all_dynamic(priv->cpu_dev); + kfree(priv); return 0; } diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index f62624357020..907a6db4d6c0 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -30,7 +30,7 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) return 0; } -static void cc_pm_go(struct cc_drvdata *drvdata) {} +static inline void cc_pm_go(struct cc_drvdata *drvdata) {} static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index 00e954f22bc9..74401e0adb29 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c @@ -30,6 +30,7 @@ #define GPIO_REG_EDGE 0xA0 struct mtk_gc { + struct irq_chip irq_chip; struct gpio_chip chip; spinlock_t lock; int bank; @@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip mediatek_gpio_irq_chip = { - .irq_unmask = mediatek_gpio_irq_unmask, - .irq_mask = mediatek_gpio_irq_mask, - .irq_mask_ack = mediatek_gpio_irq_mask, - .irq_set_type = mediatek_gpio_irq_type, -}; - static int mediatek_gpio_xlate(struct gpio_chip *chip, const struct of_phandle_args *spec, u32 *flags) @@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev, return ret; } + rg->irq_chip.name = dev_name(dev); + rg->irq_chip.parent_device = dev; + rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; + rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; + rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; + rg->irq_chip.irq_set_type = mediatek_gpio_irq_type; + if (mtk->gpio_irq) { /* * Manually request the irq here instead of passing @@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev, return ret; } - ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip, + ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip, 0, handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add gpiochip_irqchip\n"); return ret; } - gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip, + gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip, mtk->gpio_irq, NULL); } @@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev) mtk->gpio_irq = irq_of_parse_and_map(np, 0); mtk->dev = dev; platform_set_drvdata(pdev, mtk); - mediatek_gpio_irq_chip.name = dev_name(dev); for (i = 0; i < MTK_BANK_CNT; i++) { ret = mediatek_gpio_bank_probe(dev, np, i); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index e9600b556f39..bcc6be4a5cb2 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void) { switch (gpio_type) { case PXA3XX_GPIO: + case MMP2_GPIO: return false; default: diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a6e1891217e2..c34eb9d9c59a 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -125,7 +125,7 @@ static void of_gpio_flags_quirks(struct device_node *np, for_each_child_of_node(np, child) { ret = of_property_read_u32(child, "reg", &cs); - if (!ret) + if (ret) continue; if (cs == index) { /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bc62bf41b7e9..5dc349173e4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } if (amdgpu_device_is_px(dev)) { + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index aadd0fa42e43..3aa42c64484a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -405,6 +405,7 @@ struct amdgpu_crtc { struct amdgpu_flip_work *pflip_works; enum amdgpu_flip_status pflip_status; int deferred_flip_completion; + u64 last_flip_vblank; /* pll sharing */ struct amdgpu_atom_ss ss; bool ss_enabled; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7c108e687683..698bcb8ce61d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -638,12 +638,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct ttm_bo_global *glob = adev->mman.bdev.glob; struct amdgpu_vm_bo_base *bo_base; +#if 0 if (vm->bulk_moveable) { spin_lock(&glob->lru_lock); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); spin_unlock(&glob->lru_lock); return; } +#endif memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 6811a5d05b27..aa2f71cc1eba 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = { static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = { - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), @@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = }; static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0b392bfca284..636d14a60952 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params) return; } + /* Update to correct count(s) if racing with vblank irq */ + amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); /* wake up userspace */ if (amdgpu_crtc->event) { - /* Update to correct count(s) if racing with vblank irq */ - drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); - drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); /* page flip completed. clean up */ @@ -786,12 +785,13 @@ static int dm_suspend(void *handle) struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; + WARN_ON(adev->dm.cached_state); + adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); + s3_handle_mst(adev->ddev, true); amdgpu_dm_irq_suspend(adev); - WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); @@ -4827,6 +4827,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); int planes_count = 0; unsigned long flags; + u64 last_flip_vblank; + bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; /* update planes when needed */ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { @@ -4858,6 +4860,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* In commit tail framework this cannot happen */ WARN_ON(1); } + + /* For variable refresh rate mode only: + * Get vblank of last completed flip to avoid > 1 vrr flips per + * video frame by use of throttling, but allow flip programming + * anywhere in the possibly large variable vrr vblank interval + * for fine-grained flip timing control and more opportunity to + * avoid stutter on late submission of amdgpu_dm_do_flip() calls. + */ + last_flip_vblank = acrtc_attach->last_flip_vblank; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) { @@ -4881,10 +4893,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, if (plane->type == DRM_PLANE_TYPE_PRIMARY) drm_crtc_vblank_get(crtc); + /* Use old throttling in non-vrr fixed refresh rate mode + * to keep flip scheduling based on target vblank counts + * working in a backwards compatible way, e.g., clients + * using GLX_OML_sync_control extension. + */ + if (!vrr_active) + last_flip_vblank = drm_crtc_vblank_count(crtc); + amdgpu_dm_do_flip( crtc, fb, - (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, + (uint32_t) last_flip_vblank + *wait_for_vblank, dc_state); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index 19801bdba0d2..7a72ee46f14b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -662,6 +662,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; + int patched_disp_clk = context->bw.dce.dispclk_khz; + + /*TODO: W/A for dal3 linux, investigate why this works */ + if (!clk_mgr_dce->dfs_bypass_active) + patched_disp_clk = patched_disp_clk * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ @@ -671,9 +676,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; } - if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { - context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); - clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { + context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); + clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index acd418515346..a6b80fdaa666 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h @@ -37,6 +37,10 @@ void dce100_prepare_bandwidth( struct dc *dc, struct dc_state *context); +void dce100_optimize_bandwidth( + struct dc *dc, + struct dc_state *context); + bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, enum pipe_gating_control power_gating); diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index a60a90e68d91..c4543178ba20 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc) dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.pipe_control_lock = dce_pipe_control_lock; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; - dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; + dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index cdd1d6b7b9f2..4e9ea50141bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -790,9 +790,22 @@ bool dce80_validate_bandwidth( struct dc *dc, struct dc_state *context) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + int i; + bool at_least_one_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + at_least_one_pipe = true; + } + + if (at_least_one_pipe) { + /* TODO implement when needed but for now hardcode max value*/ + context->bw.dce.dispclk_khz = 681000; + context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + } else { + context->bw.dce.dispclk_khz = 0; + context->bw.dce.yclk_khz = 0; + } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 58a12ddf12f3..41883c981789 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2658,8 +2658,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .mirror = pipe_ctx->plane_state->horizontal_mirror }; - pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x; - pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y; + pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x; + pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y; if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index f3dd66ae990a..aa35007262cd 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -154,6 +154,10 @@ static int bochs_pci_probe(struct pci_dev *pdev, if (IS_ERR(dev)) return PTR_ERR(dev); + ret = pci_enable_device(pdev); + if (ret) + goto err_free_dev; + dev->pdev = pdev; pci_set_drvdata(pdev, dev); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 54e2ae614dcc..f4290f6b0c38 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1602,6 +1602,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev, old_plane_state->crtc != new_plane_state->crtc) return -EINVAL; + /* + * FIXME: Since prepare_fb and cleanup_fb are always called on + * the new_plane_state for async updates we need to block framebuffer + * changes. This prevents use of a fb that's been cleaned up and + * double cleanups from occuring. + */ + if (old_plane_state->fb != new_plane_state->fb) + return -EINVAL; + funcs = plane->helper_private; if (!funcs->atomic_async_update) return -EINVAL; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 7f365ac0b549..4ee16b264dbe 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, bool *enabled, int width, int height) { struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); - unsigned long conn_configured, conn_seq, mask; unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); + unsigned long conn_configured, conn_seq; int i, j; bool *save_enabled; bool fallback = true, ret = true; @@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, drm_modeset_backoff(&ctx); memcpy(save_enabled, enabled, count); - mask = GENMASK(count - 1, 0); + conn_seq = GENMASK(count - 1, 0); conn_configured = 0; retry: - conn_seq = conn_configured; for (i = 0; i < count; i++) { struct drm_fb_helper_connector *fb_conn; struct drm_connector *connector; @@ -371,7 +370,8 @@ retry: if (conn_configured & BIT(i)) continue; - if (conn_seq == 0 && !connector->has_tile) + /* First pass, only consider tiled connectors */ + if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) continue; if (connector->status == connector_status_connected) @@ -475,8 +475,10 @@ retry: conn_configured |= BIT(i); } - if ((conn_configured & mask) != mask && conn_configured != conn_seq) + if (conn_configured != conn_seq) { /* repeat until no more are found */ + conn_seq = conn_configured; goto retry; + } /* * If the BIOS didn't enable everything it could, fall back to have the diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index dec1e081f529..6a8fb6fd183c 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) } if (radeon_is_px(dev)) { + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index bb03079fbade..59279224e07f 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -602,6 +602,7 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m) ret = dw_i2c_clk_cfg(master); if (ret) return ret; + /* fall through */ case I3C_BUS_MODE_PURE: ret = dw_i3c_clk_cfg(master); if (ret) diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index c13c0ba30f63..d499cd61c0e8 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, static int c4iw_rdev_open(struct c4iw_rdev *rdev) { int err; + unsigned int factor; c4iw_init_dev_ucontext(rdev, &rdev->uctx); @@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) return -EINVAL; } - rdev->qpmask = rdev->lldi.udb_density - 1; - rdev->cqmask = rdev->lldi.ucq_density - 1; + /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */ + if (rdev->lldi.sge_host_page_size > PAGE_SIZE) { + pr_err("%s: unsupported sge host page size %u\n", + pci_name(rdev->lldi.pdev), + rdev->lldi.sge_host_page_size); + return -EINVAL; + } + + factor = PAGE_SIZE / rdev->lldi.sge_host_page_size; + rdev->qpmask = (rdev->lldi.udb_density * factor) - 1; + rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1; + pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 31d91538bbf4..694324b37480 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_rdma_ch *ch; - int i, j; u8 status; shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); @@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) if (status) return FAILED; - for (i = 0; i < target->ch_count; i++) { - ch = &target->ch[i]; - for (j = 0; j < target->req_ring_size; ++j) { - struct srp_request *req = &ch->req_ring[j]; - - srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); - } - } - return SUCCESS; } diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index dc9f14811e0f..58dc70bffd5b 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) for (tmp = dev; tmp; tmp = tmp->bus->self) level++; - size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path); + size = sizeof(*info) + level * sizeof(info->path[0]); if (size <= sizeof(dmar_pci_notify_info_buf)) { info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf; } else { diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index d713271ebf7c..a64116586b4c 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Clear ring flush state */ timeout = 1000; /* timeout of 1s */ - writel_relaxed(0x0, ring + RING_CONTROL); + writel_relaxed(0x0, ring->regs + RING_CONTROL); do { - if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & FLUSH_DONE_MASK)) break; mdelay(1); diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index c6a7d4582dc6..38d9df3fb199 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) return ret; } +EXPORT_SYMBOL_GPL(mbox_flush); /** * mbox_request_channel - Request a mailbox channel. diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index abba078f7f49..95ffe008ebdf 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -8,7 +8,7 @@ mmc_core-y := core.o bus.o host.o \ mmc.o mmc_ops.o sd.o sd_ops.o \ sdio.o sdio_ops.o sdio_bus.o \ sdio_cis.o sdio_io.o sdio_irq.o \ - slot-gpio.o + slot-gpio.o regulator.o mmc_core-$(CONFIG_OF) += pwrseq.o obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o obj-$(CONFIG_PWRSEQ_SD8787) += pwrseq_sd8787.o diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 14f3fdb8c6bb..2c71a434c915 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1124,7 +1124,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = md->queue.card; - unsigned int from, nr, arg; + unsigned int from, nr; int err = 0, type = MMC_BLK_DISCARD; blk_status_t status = BLK_STS_OK; @@ -1136,24 +1136,18 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) from = blk_rq_pos(req); nr = blk_rq_sectors(req); - if (mmc_can_discard(card)) - arg = MMC_DISCARD_ARG; - else if (mmc_can_trim(card)) - arg = MMC_TRIM_ARG; - else - arg = MMC_ERASE_ARG; do { err = 0; if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, - arg == MMC_TRIM_ARG ? + card->erase_arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, 0); } if (!err) - err = mmc_erase(card, from, nr, arg); + err = mmc_erase(card, from, nr, card->erase_arg); } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); if (err) status = BLK_STS_IOERR; @@ -2380,12 +2374,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%u%s", card->host->index, subname ? subname : ""); - if (mmc_card_mmc(card)) - blk_queue_logical_block_size(md->queue.queue, - card->ext_csd.data_sector_size); - else - blk_queue_logical_block_size(md->queue.queue, 512); - set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { @@ -2774,8 +2762,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) return ret; } -DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, - NULL, "%08llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, + NULL, "%08llx\n"); /* That is two digits * 512 + 1 for newline */ #define EXT_CSD_STR_LEN 1025 @@ -2863,8 +2851,9 @@ static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) if (mmc_card_mmc(card) || mmc_card_sd(card)) { md->status_dentry = - debugfs_create_file("status", S_IRUSR, root, card, - &mmc_dbg_card_status_fops); + debugfs_create_file_unsafe("status", 0400, root, + card, + &mmc_dbg_card_status_fops); if (!md->status_dentry) return -EIO; } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 5bd58b95d318..6db36dc870b5 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -21,7 +21,6 @@ #include <linux/leds.h> #include <linux/scatterlist.h> #include <linux/log2.h> -#include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeup.h> #include <linux/suspend.h> @@ -52,6 +51,7 @@ /* The max erase timeout, used when host->max_busy_timeout isn't specified */ #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */ +#define SD_DISCARD_TIMEOUT_MS (250) static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; @@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host, if (!data) return; - if (cmd->error || data->error || + if ((cmd && cmd->error) || data->error || !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) return; @@ -758,33 +758,6 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) } EXPORT_SYMBOL(mmc_set_data_timeout); -/** - * mmc_align_data_size - pads a transfer size to a more optimal value - * @card: the MMC card associated with the data transfer - * @sz: original transfer size - * - * Pads the original data size with a number of extra bytes in - * order to avoid controller bugs and/or performance hits - * (e.g. some controllers revert to PIO for certain sizes). - * - * Returns the improved size, which might be unmodified. - * - * Note that this function is only relevant when issuing a - * single scatter gather entry. - */ -unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) -{ - /* - * FIXME: We don't have a system for the controller to tell - * the core about its problems yet, so for now we just 32-bit - * align the size. - */ - sz = ((sz + 3) / 4) * 4; - - return sz; -} -EXPORT_SYMBOL(mmc_align_data_size); - /* * Allow claiming an already claimed host if the context is the same or there is * no context but the task is the same. @@ -1112,55 +1085,6 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) return mask; } -EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); - -#ifdef CONFIG_OF - -/** - * mmc_of_parse_voltage - return mask of supported voltages - * @np: The device node need to be parsed. - * @mask: mask of voltages available for MMC/SD/SDIO - * - * Parse the "voltage-ranges" DT property, returning zero if it is not - * found, negative errno if the voltage-range specification is invalid, - * or one if the voltage-range is specified and successfully parsed. - */ -int mmc_of_parse_voltage(struct device_node *np, u32 *mask) -{ - const u32 *voltage_ranges; - int num_ranges, i; - - voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); - num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; - if (!voltage_ranges) { - pr_debug("%pOF: voltage-ranges unspecified\n", np); - return 0; - } - if (!num_ranges) { - pr_err("%pOF: voltage-ranges empty\n", np); - return -EINVAL; - } - - for (i = 0; i < num_ranges; i++) { - const int j = i * 2; - u32 ocr_mask; - - ocr_mask = mmc_vddrange_to_ocrmask( - be32_to_cpu(voltage_ranges[j]), - be32_to_cpu(voltage_ranges[j + 1])); - if (!ocr_mask) { - pr_err("%pOF: voltage-range #%d is invalid\n", - np, i); - return -EINVAL; - } - *mask |= ocr_mask; - } - - return 1; -} -EXPORT_SYMBOL(mmc_of_parse_voltage); - -#endif /* CONFIG_OF */ static int mmc_of_get_func_num(struct device_node *node) { @@ -1190,246 +1114,6 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host, return NULL; } -#ifdef CONFIG_REGULATOR - -/** - * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage - * @vdd_bit: OCR bit number - * @min_uV: minimum voltage value (mV) - * @max_uV: maximum voltage value (mV) - * - * This function returns the voltage range according to the provided OCR - * bit number. If conversion is not possible a negative errno value returned. - */ -static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV) -{ - int tmp; - - if (!vdd_bit) - return -EINVAL; - - /* - * REVISIT mmc_vddrange_to_ocrmask() may have set some - * bits this regulator doesn't quite support ... don't - * be too picky, most cards and regulators are OK with - * a 0.1V range goof (it's a small error percentage). - */ - tmp = vdd_bit - ilog2(MMC_VDD_165_195); - if (tmp == 0) { - *min_uV = 1650 * 1000; - *max_uV = 1950 * 1000; - } else { - *min_uV = 1900 * 1000 + tmp * 100 * 1000; - *max_uV = *min_uV + 100 * 1000; - } - - return 0; -} - -/** - * mmc_regulator_get_ocrmask - return mask of supported voltages - * @supply: regulator to use - * - * This returns either a negative errno, or a mask of voltages that - * can be provided to MMC/SD/SDIO devices using the specified voltage - * regulator. This would normally be called before registering the - * MMC host adapter. - */ -int mmc_regulator_get_ocrmask(struct regulator *supply) -{ - int result = 0; - int count; - int i; - int vdd_uV; - int vdd_mV; - - count = regulator_count_voltages(supply); - if (count < 0) - return count; - - for (i = 0; i < count; i++) { - vdd_uV = regulator_list_voltage(supply, i); - if (vdd_uV <= 0) - continue; - - vdd_mV = vdd_uV / 1000; - result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); - } - - if (!result) { - vdd_uV = regulator_get_voltage(supply); - if (vdd_uV <= 0) - return vdd_uV; - - vdd_mV = vdd_uV / 1000; - result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); - } - - return result; -} -EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); - -/** - * mmc_regulator_set_ocr - set regulator to match host->ios voltage - * @mmc: the host to regulate - * @supply: regulator to use - * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) - * - * Returns zero on success, else negative errno. - * - * MMC host drivers may use this to enable or disable a regulator using - * a particular supply voltage. This would normally be called from the - * set_ios() method. - */ -int mmc_regulator_set_ocr(struct mmc_host *mmc, - struct regulator *supply, - unsigned short vdd_bit) -{ - int result = 0; - int min_uV, max_uV; - - if (vdd_bit) { - mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV); - - result = regulator_set_voltage(supply, min_uV, max_uV); - if (result == 0 && !mmc->regulator_enabled) { - result = regulator_enable(supply); - if (!result) - mmc->regulator_enabled = true; - } - } else if (mmc->regulator_enabled) { - result = regulator_disable(supply); - if (result == 0) - mmc->regulator_enabled = false; - } - - if (result) - dev_err(mmc_dev(mmc), - "could not set regulator OCR (%d)\n", result); - return result; -} -EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); - -static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator, - int min_uV, int target_uV, - int max_uV) -{ - /* - * Check if supported first to avoid errors since we may try several - * signal levels during power up and don't want to show errors. - */ - if (!regulator_is_supported_voltage(regulator, min_uV, max_uV)) - return -EINVAL; - - return regulator_set_voltage_triplet(regulator, min_uV, target_uV, - max_uV); -} - -/** - * mmc_regulator_set_vqmmc - Set VQMMC as per the ios - * - * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible. - * That will match the behavior of old boards where VQMMC and VMMC were supplied - * by the same supply. The Bus Operating conditions for 3.3V signaling in the - * SD card spec also define VQMMC in terms of VMMC. - * If this is not possible we'll try the full 2.7-3.6V of the spec. - * - * For 1.2V and 1.8V signaling we'll try to get as close as possible to the - * requested voltage. This is definitely a good idea for UHS where there's a - * separate regulator on the card that's trying to make 1.8V and it's best if - * we match. - * - * This function is expected to be used by a controller's - * start_signal_voltage_switch() function. - */ -int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) -{ - struct device *dev = mmc_dev(mmc); - int ret, volt, min_uV, max_uV; - - /* If no vqmmc supply then we can't change the voltage */ - if (IS_ERR(mmc->supply.vqmmc)) - return -EINVAL; - - switch (ios->signal_voltage) { - case MMC_SIGNAL_VOLTAGE_120: - return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, - 1100000, 1200000, 1300000); - case MMC_SIGNAL_VOLTAGE_180: - return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, - 1700000, 1800000, 1950000); - case MMC_SIGNAL_VOLTAGE_330: - ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV); - if (ret < 0) - return ret; - - dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n", - __func__, volt, max_uV); - - min_uV = max(volt - 300000, 2700000); - max_uV = min(max_uV + 200000, 3600000); - - /* - * Due to a limitation in the current implementation of - * regulator_set_voltage_triplet() which is taking the lowest - * voltage possible if below the target, search for a suitable - * voltage in two steps and try to stay close to vmmc - * with a 0.3V tolerance at first. - */ - if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, - min_uV, volt, max_uV)) - return 0; - - return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, - 2700000, volt, 3600000); - default: - return -EINVAL; - } -} -EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); - -#endif /* CONFIG_REGULATOR */ - -/** - * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host - * @mmc: the host to regulate - * - * Returns 0 or errno. errno should be handled, it is either a critical error - * or -EPROBE_DEFER. 0 means no critical error but it does not mean all - * regulators have been found because they all are optional. If you require - * certain regulators, you need to check separately in your driver if they got - * populated after calling this function. - */ -int mmc_regulator_get_supply(struct mmc_host *mmc) -{ - struct device *dev = mmc_dev(mmc); - int ret; - - mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); - mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); - - if (IS_ERR(mmc->supply.vmmc)) { - if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_dbg(dev, "No vmmc regulator found\n"); - } else { - ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); - if (ret > 0) - mmc->ocr_avail = ret; - else - dev_warn(dev, "Failed getting OCR mask: %d\n", ret); - } - - if (IS_ERR(mmc->supply.vqmmc)) { - if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_dbg(dev, "No vqmmc regulator found\n"); - } - - return 0; -} -EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); - /* * Mask off any voltages we don't support and select * the lowest voltage @@ -1936,6 +1620,12 @@ static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, { unsigned int erase_timeout; + /* for DISCARD none of the below calculation applies. + * the busy timeout is 250msec per discard command. + */ + if (arg == SD_DISCARD_ARG) + return SD_DISCARD_TIMEOUT_MS; + if (card->ssr.erase_timeout) { /* Erase timeout specified in SD Status Register (SSR) */ erase_timeout = card->ssr.erase_timeout * qty + @@ -2164,7 +1854,7 @@ static unsigned int mmc_align_erase_size(struct mmc_card *card, * @card: card to erase * @from: first sector to erase * @nr: number of sectors to erase - * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) + * @arg: erase command argument * * Caller must claim host before calling this function. */ @@ -2181,14 +1871,14 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, if (!card->erase_size) return -EOPNOTSUPP; - if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) + if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG) return -EOPNOTSUPP; - if ((arg & MMC_SECURE_ARGS) && + if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) && !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) return -EOPNOTSUPP; - if ((arg & MMC_TRIM_ARGS) && + if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) && !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) return -EOPNOTSUPP; @@ -2381,9 +2071,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card) return card->pref_erase; max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); - if (max_discard && mmc_can_trim(card)) { + if (mmc_can_trim(card)) { max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); - if (max_trim < max_discard) + if (max_trim < max_discard || max_discard == 0) max_discard = max_trim; } else if (max_discard < card->erase_size) { max_discard = 0; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 8fb6bc37f808..b5083b13d594 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -59,6 +59,7 @@ void mmc_power_up(struct mmc_host *host, u32 ocr); void mmc_power_off(struct mmc_host *host); void mmc_power_cycle(struct mmc_host *host, u32 ocr); void mmc_set_initial_state(struct mmc_host *host); +u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); static inline void mmc_delay(unsigned int ms) { diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index cf58ccaf22d5..3a4402a79904 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -194,7 +194,7 @@ int mmc_of_parse(struct mmc_host *host) switch (bus_width) { case 8: host->caps |= MMC_CAP_8_BIT_DATA; - /* Hosts capable of 8-bit transfers can also do 4 bits */ + /* fall through - Hosts capable of 8-bit can also do 4 bits */ case 4: host->caps |= MMC_CAP_4_BIT_DATA; break; @@ -260,7 +260,7 @@ int mmc_of_parse(struct mmc_host *host) /* Parse Write Protection */ ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); - ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); + ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) @@ -349,6 +349,50 @@ int mmc_of_parse(struct mmc_host *host) EXPORT_SYMBOL(mmc_of_parse); /** + * mmc_of_parse_voltage - return mask of supported voltages + * @np: The device node need to be parsed. + * @mask: mask of voltages available for MMC/SD/SDIO + * + * Parse the "voltage-ranges" DT property, returning zero if it is not + * found, negative errno if the voltage-range specification is invalid, + * or one if the voltage-range is specified and successfully parsed. + */ +int mmc_of_parse_voltage(struct device_node *np, u32 *mask) +{ + const u32 *voltage_ranges; + int num_ranges, i; + + voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); + num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; + if (!voltage_ranges) { + pr_debug("%pOF: voltage-ranges unspecified\n", np); + return 0; + } + if (!num_ranges) { + pr_err("%pOF: voltage-ranges empty\n", np); + return -EINVAL; + } + + for (i = 0; i < num_ranges; i++) { + const int j = i * 2; + u32 ocr_mask; + + ocr_mask = mmc_vddrange_to_ocrmask( + be32_to_cpu(voltage_ranges[j]), + be32_to_cpu(voltage_ranges[j + 1])); + if (!ocr_mask) { + pr_err("%pOF: voltage-range #%d is invalid\n", + np, i); + return -EINVAL; + } + *mask |= ocr_mask; + } + + return 1; +} +EXPORT_SYMBOL(mmc_of_parse_voltage); + +/** * mmc_alloc_host - initialise the per-host structure. * @extra: sizeof private data structure * @dev: pointer to host device model structure diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index da892a599524..3e786ba204c3 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1594,6 +1594,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); err = -ENOENT; goto err; } @@ -1743,6 +1745,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; } + /* set erase_arg */ + if (mmc_can_discard(card)) + card->erase_arg = MMC_DISCARD_ARG; + else if (mmc_can_trim(card)) + card->erase_arg = MMC_TRIM_ARG; + else + card->erase_arg = MMC_ERASE_ARG; + /* * Select timing interface */ diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 9054329fe903..c5208fb312ae 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -562,7 +562,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (index == EXT_CSD_SANITIZE_START) cmd.sanitize_busy = true; - err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); + err = mmc_wait_for_cmd(host, &cmd, 0); if (err) goto out; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 35cc138b096d..15a45ec6518d 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; + unsigned block_size = 512; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; @@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); - blk_queue_max_segment_size(mq->queue, host->max_seg_size); + + if (mmc_card_mmc(card)) + block_size = card->ext_csd.data_sector_size; + + blk_queue_logical_block_size(mq->queue, block_size); + blk_queue_max_segment_size(mq->queue, + round_down(host->max_seg_size, block_size)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c new file mode 100644 index 000000000000..b6febbcf8978 --- /dev/null +++ b/drivers/mmc/core/regulator.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Helper functions for MMC regulators. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/log2.h> +#include <linux/regulator/consumer.h> + +#include <linux/mmc/host.h> + +#include "core.h" +#include "host.h" + +#ifdef CONFIG_REGULATOR + +/** + * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage + * @vdd_bit: OCR bit number + * @min_uV: minimum voltage value (mV) + * @max_uV: maximum voltage value (mV) + * + * This function returns the voltage range according to the provided OCR + * bit number. If conversion is not possible a negative errno value returned. + */ +static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV) +{ + int tmp; + + if (!vdd_bit) + return -EINVAL; + + /* + * REVISIT mmc_vddrange_to_ocrmask() may have set some + * bits this regulator doesn't quite support ... don't + * be too picky, most cards and regulators are OK with + * a 0.1V range goof (it's a small error percentage). + */ + tmp = vdd_bit - ilog2(MMC_VDD_165_195); + if (tmp == 0) { + *min_uV = 1650 * 1000; + *max_uV = 1950 * 1000; + } else { + *min_uV = 1900 * 1000 + tmp * 100 * 1000; + *max_uV = *min_uV + 100 * 1000; + } + + return 0; +} + +/** + * mmc_regulator_get_ocrmask - return mask of supported voltages + * @supply: regulator to use + * + * This returns either a negative errno, or a mask of voltages that + * can be provided to MMC/SD/SDIO devices using the specified voltage + * regulator. This would normally be called before registering the + * MMC host adapter. + */ +static int mmc_regulator_get_ocrmask(struct regulator *supply) +{ + int result = 0; + int count; + int i; + int vdd_uV; + int vdd_mV; + + count = regulator_count_voltages(supply); + if (count < 0) + return count; + + for (i = 0; i < count; i++) { + vdd_uV = regulator_list_voltage(supply, i); + if (vdd_uV <= 0) + continue; + + vdd_mV = vdd_uV / 1000; + result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); + } + + if (!result) { + vdd_uV = regulator_get_voltage(supply); + if (vdd_uV <= 0) + return vdd_uV; + + vdd_mV = vdd_uV / 1000; + result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); + } + + return result; +} + +/** + * mmc_regulator_set_ocr - set regulator to match host->ios voltage + * @mmc: the host to regulate + * @supply: regulator to use + * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) + * + * Returns zero on success, else negative errno. + * + * MMC host drivers may use this to enable or disable a regulator using + * a particular supply voltage. This would normally be called from the + * set_ios() method. + */ +int mmc_regulator_set_ocr(struct mmc_host *mmc, + struct regulator *supply, + unsigned short vdd_bit) +{ + int result = 0; + int min_uV, max_uV; + + if (vdd_bit) { + mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV); + + result = regulator_set_voltage(supply, min_uV, max_uV); + if (result == 0 && !mmc->regulator_enabled) { + result = regulator_enable(supply); + if (!result) + mmc->regulator_enabled = true; + } + } else if (mmc->regulator_enabled) { + result = regulator_disable(supply); + if (result == 0) + mmc->regulator_enabled = false; + } + + if (result) + dev_err(mmc_dev(mmc), + "could not set regulator OCR (%d)\n", result); + return result; +} +EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); + +static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator, + int min_uV, int target_uV, + int max_uV) +{ + /* + * Check if supported first to avoid errors since we may try several + * signal levels during power up and don't want to show errors. + */ + if (!regulator_is_supported_voltage(regulator, min_uV, max_uV)) + return -EINVAL; + + return regulator_set_voltage_triplet(regulator, min_uV, target_uV, + max_uV); +} + +/** + * mmc_regulator_set_vqmmc - Set VQMMC as per the ios + * + * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible. + * That will match the behavior of old boards where VQMMC and VMMC were supplied + * by the same supply. The Bus Operating conditions for 3.3V signaling in the + * SD card spec also define VQMMC in terms of VMMC. + * If this is not possible we'll try the full 2.7-3.6V of the spec. + * + * For 1.2V and 1.8V signaling we'll try to get as close as possible to the + * requested voltage. This is definitely a good idea for UHS where there's a + * separate regulator on the card that's trying to make 1.8V and it's best if + * we match. + * + * This function is expected to be used by a controller's + * start_signal_voltage_switch() function. + */ +int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct device *dev = mmc_dev(mmc); + int ret, volt, min_uV, max_uV; + + /* If no vqmmc supply then we can't change the voltage */ + if (IS_ERR(mmc->supply.vqmmc)) + return -EINVAL; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_120: + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 1100000, 1200000, 1300000); + case MMC_SIGNAL_VOLTAGE_180: + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 1700000, 1800000, 1950000); + case MMC_SIGNAL_VOLTAGE_330: + ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV); + if (ret < 0) + return ret; + + dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n", + __func__, volt, max_uV); + + min_uV = max(volt - 300000, 2700000); + max_uV = min(max_uV + 200000, 3600000); + + /* + * Due to a limitation in the current implementation of + * regulator_set_voltage_triplet() which is taking the lowest + * voltage possible if below the target, search for a suitable + * voltage in two steps and try to stay close to vmmc + * with a 0.3V tolerance at first. + */ + if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + min_uV, volt, max_uV)) + return 0; + + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 2700000, volt, 3600000); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); + +#else + +static inline int mmc_regulator_get_ocrmask(struct regulator *supply) +{ + return 0; +} + +#endif /* CONFIG_REGULATOR */ + +/** + * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host + * @mmc: the host to regulate + * + * Returns 0 or errno. errno should be handled, it is either a critical error + * or -EPROBE_DEFER. 0 means no critical error but it does not mean all + * regulators have been found because they all are optional. If you require + * certain regulators, you need to check separately in your driver if they got + * populated after calling this function. + */ +int mmc_regulator_get_supply(struct mmc_host *mmc) +{ + struct device *dev = mmc_dev(mmc); + int ret; + + mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); + mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); + + if (IS_ERR(mmc->supply.vmmc)) { + if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_dbg(dev, "No vmmc regulator found\n"); + } else { + ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); + if (ret > 0) + mmc->ocr_avail = ret; + else + dev_warn(dev, "Failed getting OCR mask: %d\n", ret); + } + + if (IS_ERR(mmc->supply.vqmmc)) { + if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_dbg(dev, "No vqmmc regulator found\n"); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index d0d9f90e7cdf..265e1aeeb9d8 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -209,6 +209,11 @@ static int mmc_decode_scr(struct mmc_card *card) /* Check if Physical Layer Spec v3.0 is supported */ scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1); + if (scr->sda_spec3) { + scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1); + scr->sda_specx = UNSTUFF_BITS(resp, 38, 4); + } + if (UNSTUFF_BITS(resp, 55, 1)) card->erased_byte = 0xFF; else @@ -226,6 +231,8 @@ static int mmc_read_ssr(struct mmc_card *card) { unsigned int au, es, et, eo; __be32 *raw_ssr; + u32 resp[4] = {}; + u8 discard_support; int i; if (!(card->csd.cmdclass & CCC_APP_SPEC)) { @@ -271,6 +278,14 @@ static int mmc_read_ssr(struct mmc_card *card) } } + /* + * starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set + */ + resp[3] = card->raw_ssr[6]; + discard_support = UNSTUFF_BITS(resp, 313 - 288, 1); + card->erase_arg = (card->scr.sda_specx && discard_support) ? + SD_DISCARD_ARG : SD_ERASE_ARG; + return 0; } @@ -936,8 +951,11 @@ retry: return err; if (oldcard) { - if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) + if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); return -ENOENT; + } card = oldcard; } else { diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 47056d8d1bac..0bb0b8419016 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -52,36 +52,17 @@ int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) } EXPORT_SYMBOL_GPL(mmc_app_cmd); -/** - * mmc_wait_for_app_cmd - start an application command and wait for - completion - * @host: MMC host to start command - * @card: Card to send MMC_APP_CMD to - * @cmd: MMC command to start - * @retries: maximum number of retries - * - * Sends a MMC_APP_CMD, checks the card response, sends the command - * in the parameter and waits for it to complete. Return any error - * that occurred while the command was executing. Do not attempt to - * parse the response. - */ -int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, - struct mmc_command *cmd, int retries) +static int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, + struct mmc_command *cmd) { struct mmc_request mrq = {}; - - int i, err; - - if (retries < 0) - retries = MMC_CMD_RETRIES; - - err = -EIO; + int i, err = -EIO; /* * We have to resend MMC_APP_CMD for each attempt so * we cannot use the retries field in mmc_command. */ - for (i = 0;i <= retries;i++) { + for (i = 0; i <= MMC_CMD_RETRIES; i++) { err = mmc_app_cmd(host, card); if (err) { /* no point in retrying; no APP commands allowed */ @@ -116,8 +97,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, return err; } -EXPORT_SYMBOL(mmc_wait_for_app_cmd); - int mmc_app_set_bus_width(struct mmc_card *card, int width) { struct mmc_command cmd = {}; @@ -136,7 +115,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width) return -EINVAL; } - return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES); + return mmc_wait_for_app_cmd(card->host, card, &cmd); } int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) @@ -152,7 +131,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; for (i = 100; i; i--) { - err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES); + err = mmc_wait_for_app_cmd(host, NULL, &cmd); if (err) break; diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h index 0e6c3d51e66d..ffaed5cacc88 100644 --- a/drivers/mmc/core/sd_ops.h +++ b/drivers/mmc/core/sd_ops.h @@ -16,7 +16,6 @@ struct mmc_card; struct mmc_host; -struct mmc_command; int mmc_app_set_bus_width(struct mmc_card *card, int width); int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); @@ -27,8 +26,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp); int mmc_app_sd_status(struct mmc_card *card, void *ssr); int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card); -int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, - struct mmc_command *cmd, int retries); #endif diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index d8e17ea6126d..6718fc8bb40f 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -617,6 +617,8 @@ try_again: if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) { mmc_remove_card(card); + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); return -ENOENT; } } else { @@ -624,6 +626,8 @@ try_again: if (oldcard && oldcard->type != MMC_TYPE_SDIO) { mmc_remove_card(card); + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); return -ENOENT; } } @@ -736,8 +740,11 @@ try_again: int same = (card->cis.vendor == oldcard->cis.vendor && card->cis.device == oldcard->cis.device); mmc_remove_card(card); - if (!same) + if (!same) { + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); return -ENOENT; + } card = oldcard; } diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index b6d8203e46eb..62b0f5ecc7f7 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -179,7 +179,6 @@ static int sdio_bus_remove(struct device *dev) { struct sdio_driver *drv = to_sdio_driver(dev->driver); struct sdio_func *func = dev_to_sdio_func(dev); - int ret = 0; /* Make sure card is powered before invoking ->remove() */ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) @@ -205,7 +204,7 @@ static int sdio_bus_remove(struct device *dev) dev_pm_domain_detach(dev, false); - return ret; + return 0; } static const struct dev_pm_ops sdio_bus_pm_ops = { diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index d40744bbafa9..3f67fbbe0d75 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -10,6 +10,7 @@ */ #include <linux/export.h> +#include <linux/kernel.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio.h> @@ -203,6 +204,21 @@ static inline unsigned int sdio_max_byte_size(struct sdio_func *func) return min(mval, 512u); /* maximum size for byte mode */ } +/* + * This is legacy code, which needs to be re-worked some day. Basically we need + * to take into account the properties of the host, as to enable the SDIO func + * driver layer to allocate optimal buffers. + */ +static inline unsigned int _sdio_align_size(unsigned int sz) +{ + /* + * FIXME: We don't have a system for the controller to tell + * the core about its problems yet, so for now we just 32-bit + * align the size. + */ + return ALIGN(sz, 4); +} + /** * sdio_align_size - pads a transfer size to a more optimal value * @func: SDIO function @@ -230,7 +246,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) * wants to increase the size up to a point where it * might need more than one block. */ - sz = mmc_align_data_size(func->card, sz); + sz = _sdio_align_size(sz); /* * If we can still do this with just a byte transfer, then @@ -252,7 +268,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) */ blk_sz = ((sz + func->cur_blksize - 1) / func->cur_blksize) * func->cur_blksize; - blk_sz = mmc_align_data_size(func->card, blk_sz); + blk_sz = _sdio_align_size(blk_sz); /* * This value is only good if it is still just @@ -265,8 +281,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) * We failed to do one request, but at least try to * pad the remainder properly. */ - byte_sz = mmc_align_data_size(func->card, - sz % func->cur_blksize); + byte_sz = _sdio_align_size(sz % func->cur_blksize); if (byte_sz <= sdio_max_byte_size(func)) { blk_sz = sz / func->cur_blksize; return blk_sz * func->cur_blksize + byte_sz; @@ -276,16 +291,14 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) * We need multiple requests, so first check that the * controller can handle the chunk size; */ - chunk_sz = mmc_align_data_size(func->card, - sdio_max_byte_size(func)); + chunk_sz = _sdio_align_size(sdio_max_byte_size(func)); if (chunk_sz == sdio_max_byte_size(func)) { /* * Fix up the size of the remainder (if any) */ byte_sz = orig_sz % chunk_sz; if (byte_sz) { - byte_sz = mmc_align_data_size(func->card, - byte_sz); + byte_sz = _sdio_align_size(byte_sz); } return (orig_sz / chunk_sz) * chunk_sz + byte_sz; diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h index 96945cafbf0b..1f6d0447ea0f 100644 --- a/drivers/mmc/core/sdio_ops.h +++ b/drivers/mmc/core/sdio_ops.h @@ -25,7 +25,6 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn, int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz); int sdio_reset(struct mmc_host *host); -unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz); void sdio_irq_work(struct work_struct *work); static inline bool sdio_is_io_busy(u32 opcode, u32 arg) diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 319ccd93383d..4afc6b87b465 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -22,7 +22,6 @@ struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; - bool override_ro_active_level; bool override_cd_active_level; irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); char *ro_label; @@ -71,10 +70,6 @@ int mmc_gpio_get_ro(struct mmc_host *host) if (!ctx || !ctx->ro_gpio) return -ENOSYS; - if (ctx->override_ro_active_level) - return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^ - !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); - return gpiod_get_value_cansleep(ctx->ro_gpio); } EXPORT_SYMBOL(mmc_gpio_get_ro); @@ -225,7 +220,6 @@ EXPORT_SYMBOL(mmc_can_gpio_cd); * @host: mmc host * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer - * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds * @gpio_invert: will return whether the GPIO line is inverted or not, * set to NULL to ignore @@ -233,7 +227,7 @@ EXPORT_SYMBOL(mmc_can_gpio_cd); * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, bool override_active_level, + unsigned int idx, unsigned int debounce, bool *gpio_invert) { struct mmc_gpio *ctx = host->slot.handler_priv; @@ -253,7 +247,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, if (gpio_invert) *gpio_invert = !gpiod_is_active_low(desc); - ctx->override_ro_active_level = override_active_level; ctx->ro_gpio = desc; return 0; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index a44ec8bb5418..28fcd8f580a1 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -224,6 +224,7 @@ config MMC_SDHCI_ESDHC_IMX depends on ARCH_MXC depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI help This selects the Freescale eSDHC/uSDHC controller support found on i.MX25, i.MX35 i.MX5x and i.MX6x. @@ -250,6 +251,7 @@ config MMC_SDHCI_TEGRA depends on ARCH_TEGRA depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI help This selects the Tegra SD/MMC controller. If you have a Tegra platform with SD or MMC devices, say Y or M here. diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 47189f9ed4e2..735aa5871358 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -1410,6 +1410,9 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_BUS_WIDTH_4: slot->sdc_reg |= ATMCI_SDCBUS_4BIT; break; + case MMC_BUS_WIDTH_8: + slot->sdc_reg |= ATMCI_SDCBUS_8BIT; + break; } if (ios->clock) { @@ -2275,8 +2278,11 @@ static int atmci_init_slot(struct atmel_mci *host, * use only one bit for data to prevent fifo underruns and overruns * which will corrupt data. */ - if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) + if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) { mmc->caps |= MMC_CAP_4_BIT_DATA; + if (slot_data->bus_width >= 8) + mmc->caps |= MMC_CAP_8_BIT_DATA; + } if (atmci_get_version(host) < 0x200) { mmc->max_segs = 256; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index c9e7aa50bb0a..7e0d3a49c06d 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -148,7 +148,6 @@ struct bcm2835_host { void __iomem *ioaddr; u32 phys_addr; - struct mmc_host *mmc; struct platform_device *pdev; int clock; /* Current clock speed */ @@ -618,7 +617,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host) "failed to terminate DMA (%d)\n", err); } - mmc_request_done(host->mmc, mrq); + mmc_request_done(mmc_from_priv(host), mrq); } static @@ -837,7 +836,7 @@ static void bcm2835_timeout(struct work_struct *work) dev_err(dev, "timeout waiting for hardware interrupt.\n"); bcm2835_dumpregs(host); - bcm2835_reset(host->mmc); + bcm2835_reset(mmc_from_priv(host)); if (host->data) { host->data->error = -ETIMEDOUT; @@ -1100,6 +1099,7 @@ static void bcm2835_dma_complete_work(struct work_struct *work) static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock) { + struct mmc_host *mmc = mmc_from_priv(host); int div; /* The SDCDIV register has 11 bits, and holds (div - 2). But @@ -1143,18 +1143,18 @@ static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock) div = SDCDIV_MAX_CDIV; clock = host->max_clk / (div + 2); - host->mmc->actual_clock = clock; + mmc->actual_clock = clock; /* Calibrate some delays */ host->ns_per_fifo_word = (1000000000 / clock) * - ((host->mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32); + ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32); host->cdiv = div; writel(host->cdiv, host->ioaddr + SDCDIV); /* Set the timeout to 500ms */ - writel(host->mmc->actual_clock / 2, host->ioaddr + SDTOUT); + writel(mmc->actual_clock / 2, host->ioaddr + SDTOUT); } static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -1264,7 +1264,7 @@ static const struct mmc_host_ops bcm2835_ops = { static int bcm2835_add_host(struct bcm2835_host *host) { - struct mmc_host *mmc = host->mmc; + struct mmc_host *mmc = mmc_from_priv(host); struct device *dev = &host->pdev->dev; char pio_limit_string[20]; int ret; @@ -1286,7 +1286,7 @@ static int bcm2835_add_host(struct bcm2835_host *host) spin_lock_init(&host->lock); mutex_init(&host->mutex); - if (IS_ERR_OR_NULL(host->dma_chan_rxtx)) { + if (!host->dma_chan_rxtx) { dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n"); host->use_dma = false; } else { @@ -1370,7 +1370,6 @@ static int bcm2835_probe(struct platform_device *pdev) mmc->ops = &bcm2835_ops; host = mmc_priv(mmc); - host->mmc = mmc; host->pdev = pdev; spin_lock_init(&host->lock); @@ -1441,8 +1440,9 @@ err: static int bcm2835_remove(struct platform_device *pdev) { struct bcm2835_host *host = platform_get_drvdata(pdev); + struct mmc_host *mmc = mmc_from_priv(host); - mmc_remove_host(host->mmc); + mmc_remove_host(mmc); writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD); @@ -1454,8 +1454,7 @@ static int bcm2835_remove(struct platform_device *pdev) if (host->dma_chan_rxtx) dma_release_channel(host->dma_chan_rxtx); - mmc_free_host(host->mmc); - platform_set_drvdata(pdev, NULL); + mmc_free_host(mmc); return 0; } diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 1087b4c79cd6..4c477dcd2ada 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -566,30 +566,32 @@ static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) cb710_mmc_select_clock_divider(mmc, ios->clock); - if (ios->power_mode != reader->last_power_mode) - switch (ios->power_mode) { - case MMC_POWER_ON: - err = cb710_mmc_powerup(slot); - if (err) { - dev_warn(cb710_slot_dev(slot), - "powerup failed (%d)- retrying\n", err); - cb710_mmc_powerdown(slot); - udelay(1); + if (ios->power_mode != reader->last_power_mode) { + switch (ios->power_mode) { + case MMC_POWER_ON: err = cb710_mmc_powerup(slot); - if (err) + if (err) { dev_warn(cb710_slot_dev(slot), - "powerup retry failed (%d) - expect errors\n", + "powerup failed (%d)- retrying\n", err); + cb710_mmc_powerdown(slot); + udelay(1); + err = cb710_mmc_powerup(slot); + if (err) + dev_warn(cb710_slot_dev(slot), + "powerup retry failed (%d) - expect errors\n", err); + } + reader->last_power_mode = MMC_POWER_ON; + break; + case MMC_POWER_OFF: + cb710_mmc_powerdown(slot); + reader->last_power_mode = MMC_POWER_OFF; + break; + case MMC_POWER_UP: + default: + /* ignore */ + break; } - reader->last_power_mode = MMC_POWER_ON; - break; - case MMC_POWER_OFF: - cb710_mmc_powerdown(slot); - reader->last_power_mode = MMC_POWER_OFF; - break; - case MMC_POWER_UP: - default: - /* ignore */; } cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1); diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index 159270e947cf..a8af682a9182 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * - (cq_host->num_slots - 1); + cq_host->mmc->cqe_qdepth; pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, @@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) cq_host->desc_size, &cq_host->desc_dma_base, GFP_KERNEL); + if (!cq_host->desc_base) + return -ENOMEM; + cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), cq_host->data_size, &cq_host->trans_desc_dma_base, GFP_KERNEL); - if (!cq_host->desc_base || !cq_host->trans_desc_base) + if (!cq_host->trans_desc_base) { + dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, + cq_host->desc_base, + cq_host->desc_dma_base); + cq_host->desc_base = NULL; + cq_host->desc_dma_base = 0; return -ENOMEM; + } pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 9e68c3645e22..49e0daf2ef5e 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1193,7 +1193,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc) else if (ret) mmc->caps |= MMC_CAP_NEEDS_POLL; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 33215d66afa2..63303022669c 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -21,7 +21,6 @@ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> -#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> @@ -36,7 +35,6 @@ #include <asm/cacheflush.h> #include <asm/mach-jz4740/dma.h> -#include <asm/mach-jz4740/jz4740_mmc.h> #define JZ_REG_MMC_STRPCL 0x00 #define JZ_REG_MMC_STATUS 0x04 @@ -148,9 +146,7 @@ enum jz4780_cookie { struct jz4740_mmc_host { struct mmc_host *mmc; struct platform_device *pdev; - struct jz4740_mmc_platform_data *pdata; struct clk *clk; - struct gpio_desc *power; enum jz4740_mmc_version version; @@ -743,6 +739,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; jz_mmc_prepare_data_transfer(host); + /* fall through */ case JZ4740_MMC_STATE_TRANSFER_DATA: if (host->use_dma) { @@ -777,6 +774,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; } jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); + /* fall through */ case JZ4740_MMC_STATE_SEND_STOP: if (!req->stop) @@ -894,16 +892,16 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_UP: jz4740_mmc_reset(host); - if (host->power) - gpiod_set_value(host->power, 1); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); host->cmdat |= JZ_MMC_CMDAT_INIT; clk_prepare_enable(host->clk); break; case MMC_POWER_ON: break; default: - if (host->power) - gpiod_set_value(host->power, 0); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); clk_disable_unprepare(host->clk); break; } @@ -936,38 +934,6 @@ static const struct mmc_host_ops jz4740_mmc_ops = { .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, }; -static int jz4740_mmc_request_gpios(struct jz4740_mmc_host *host, - struct mmc_host *mmc, - struct platform_device *pdev) -{ - struct jz4740_mmc_platform_data *pdata = dev_get_platdata(&pdev->dev); - int ret = 0; - - if (!pdata) - return 0; - - if (!pdata->card_detect_active_low) - mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; - if (!pdata->read_only_active_low) - mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - - /* - * Get optional card detect and write protect GPIOs, - * only back out on probe deferral. - */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); - if (ret == -EPROBE_DEFER) - return ret; - - ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); - if (ret == -EPROBE_DEFER) - return ret; - - host->power = devm_gpiod_get_optional(&pdev->dev, "power", - GPIOD_OUT_HIGH); - return PTR_ERR_OR_ZERO(host->power); -} - static const struct of_device_id jz4740_mmc_of_match[] = { { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 }, { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B }, @@ -982,9 +948,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev) struct mmc_host *mmc; struct jz4740_mmc_host *host; const struct of_device_id *match; - struct jz4740_mmc_platform_data *pdata; - - pdata = dev_get_platdata(&pdev->dev); mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); if (!mmc) { @@ -993,29 +956,25 @@ static int jz4740_mmc_probe(struct platform_device* pdev) } host = mmc_priv(mmc); - host->pdata = pdata; match = of_match_device(jz4740_mmc_of_match, &pdev->dev); if (match) { host->version = (enum jz4740_mmc_version)match->data; - ret = mmc_of_parse(mmc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "could not parse of data: %d\n", ret); - goto err_free_host; - } } else { /* JZ4740 should be the only one using legacy probe */ host->version = JZ_MMC_JZ4740; - mmc->caps |= MMC_CAP_SDIO_IRQ; - if (!(pdata && pdata->data_1bit)) - mmc->caps |= MMC_CAP_4_BIT_DATA; - ret = jz4740_mmc_request_gpios(host, mmc, pdev); - if (ret) - goto err_free_host; } + ret = mmc_of_parse(mmc); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "could not parse device properties: %d\n", ret); + goto err_free_host; + } + + mmc_regulator_get_supply(mmc); + host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = host->irq; diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 10ba46b728e8..1b1498805972 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1450,9 +1450,10 @@ static int mmc_spi_probe(struct spi_device *spi) mmc->caps &= ~MMC_CAP_NEEDS_POLL; mmc_gpiod_request_cd_irq(mmc); } + mmc_detect_change(mmc, 0); /* Index 1 is write protect/read only */ - status = mmc_gpiod_request_ro(mmc, NULL, 1, false, 0, NULL); + status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index e352f5ad5801..387ff14587b8 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1127,6 +1127,12 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) writel(c, base + MMCICOMMAND); } +static void mmci_stop_command(struct mmci_host *host) +{ + host->stop_abort.error = 0; + mmci_start_command(host, &host->stop_abort, 0); +} + static void mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) @@ -1196,10 +1202,16 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, /* The error clause is handled above, success! */ data->bytes_xfered = data->blksz * data->blocks; - if (!data->stop || (host->mrq->sbc && !data->error)) + if (!data->stop) { + if (host->variant->cmdreg_stop && data->error) + mmci_stop_command(host); + else + mmci_request_end(host, data->mrq); + } else if (host->mrq->sbc && !data->error) { mmci_request_end(host, data->mrq); - else + } else { mmci_start_command(host, data->stop, 0); + } } } @@ -1298,6 +1310,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, mmci_dma_error(host); mmci_stop_data(host); + if (host->variant->cmdreg_stop && cmd->error) { + mmci_stop_command(host); + return; + } } mmci_request_end(host, host->mrq); } else if (sbc) { @@ -1956,6 +1972,11 @@ static int mmci_probe(struct amba_device *dev, mmc->max_busy_timeout = 0; } + /* Prepare a CMD12 - needed to clear the DPSM on some variants. */ + host->stop_abort.opcode = MMC_STOP_TRANSMISSION; + host->stop_abort.arg = 0; + host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC; + mmc->ops = &mmci_ops; /* We support these PM capabilities. */ @@ -2011,7 +2032,7 @@ static int mmci_probe(struct amba_device *dev, if (ret == -EPROBE_DEFER) goto clk_disable; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); if (ret == -EPROBE_DEFER) goto clk_disable; } diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 24229097d05c..14df81054438 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -377,6 +377,7 @@ struct mmci_host { void __iomem *base; struct mmc_request *mrq; struct mmc_command *cmd; + struct mmc_command stop_abort; struct mmc_data *data; struct mmc_host *mmc; struct clk *clk; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 4d17032d15ee..d54612257b06 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -31,14 +31,12 @@ #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/dmaengine.h> #include <linux/types.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_dma.h> -#include <linux/of_gpio.h> #include <linux/mmc/slot-gpio.h> #include <asm/dma.h> diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index add1e70195ea..4f06fb03c0a2 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -25,7 +25,6 @@ #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -39,7 +38,6 @@ #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> #include <linux/mmc/slot-gpio.h> -#include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/module.h> #include <linux/stmp_device.h> diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index b294b221f225..8a274b91804e 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -61,9 +61,6 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) struct device *dev = &spi->dev; struct device_node *np = dev->of_node; struct of_mmc_spi *oms; - const __be32 *voltage_ranges; - int num_ranges; - int i; if (dev->platform_data || !np) return dev->platform_data; @@ -72,25 +69,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) if (!oms) return NULL; - voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); - num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; - if (!voltage_ranges || !num_ranges) { - dev_err(dev, "OF: voltage-ranges unspecified\n"); + if (mmc_of_parse_voltage(np, &oms->pdata.ocr_mask) <= 0) goto err_ocr; - } - - for (i = 0; i < num_ranges; i++) { - const int j = i * 2; - u32 mask; - - mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]), - be32_to_cpu(voltage_ranges[j + 1])); - if (!mask) { - dev_err(dev, "OF: voltage-range #%d is invalid\n", i); - goto err_ocr; - } - oms->pdata.ocr_mask |= mask; - } oms->detect_irq = irq_of_parse_and_map(np, 0); if (oms->detect_irq != 0) { diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index c60a7625b1fa..b2873a2432b6 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques reg &= ~(1 << 5); OMAP_MMC_WRITE(host, SDIO, reg); /* Set maximum timeout */ - OMAP_MMC_WRITE(host, CTO, 0xff); + OMAP_MMC_WRITE(host, CTO, 0xfd); } static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req) diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 8779bbaa6b69..c907bf502a12 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -743,7 +743,7 @@ static int pxamci_probe(struct platform_device *pdev) goto out; } - ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_ro\n"); goto out; diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index da1e49c45bec..8394a7bb1fc1 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -15,6 +15,7 @@ struct renesas_sdhi_scc { unsigned long clk_rate; /* clock rate for SDR104 */ u32 tap; /* sampling clock position for SDR104 */ + u32 tap_hs400; /* sampling clock position for HS400 */ }; struct renesas_sdhi_of_data { @@ -49,6 +50,7 @@ struct renesas_sdhi { struct pinctrl_state *pins_default, *pins_uhs; void __iomem *scc_ctl; u32 scc_tappos; + u32 scc_tappos_hs400; }; #define host_to_priv(host) \ diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 31a351a20dc0..71e13844df6c 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -337,6 +337,10 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host) /* Set HS400 mode */ sd_ctrl_write16(host, CTL_SDIF_MODE, 0x0001 | sd_ctrl_read16(host, CTL_SDIF_MODE)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, + priv->scc_tappos_hs400); + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2, (SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN | SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) | @@ -396,6 +400,9 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host, /* Reset HS400 mode */ sd_ctrl_write16(host, CTL_SDIF_MODE, ~0x0001 & sd_ctrl_read16(host, CTL_SDIF_MODE)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos); + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2, ~(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN | SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) & @@ -723,6 +730,13 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->ops.start_signal_voltage_switch = renesas_sdhi_start_signal_voltage_switch; host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27; + + /* SDR and HS200/400 registers requires HW reset */ + if (of_data && of_data->scc_offset) { + priv->scc_ctl = host->ctl + of_data->scc_offset; + host->mmc->caps |= MMC_CAP_HW_RESET; + host->hw_reset = renesas_sdhi_hw_reset; + } } /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */ @@ -775,12 +789,11 @@ int renesas_sdhi_probe(struct platform_device *pdev, const struct renesas_sdhi_scc *taps = of_data->taps; bool hit = false; - host->mmc->caps |= MMC_CAP_HW_RESET; - for (i = 0; i < of_data->taps_num; i++) { if (taps[i].clk_rate == 0 || taps[i].clk_rate == host->mmc->f_max) { priv->scc_tappos = taps->tap; + priv->scc_tappos_hs400 = taps->tap_hs400; hit = true; break; } @@ -789,12 +802,10 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!hit) dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n"); - priv->scc_ctl = host->ctl + of_data->scc_offset; host->init_tuning = renesas_sdhi_init_tuning; host->prepare_tuning = renesas_sdhi_prepare_tuning; host->select_tuning = renesas_sdhi_select_tuning; host->check_scc_error = renesas_sdhi_check_scc_error; - host->hw_reset = renesas_sdhi_hw_reset; host->prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning; host->hs400_downgrade = renesas_sdhi_disable_scc; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 92c9b15252da..9dfafa2a90a3 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -81,6 +81,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { { .clk_rate = 0, .tap = 0x00000300, + .tap_hs400 = 0x00000704, }, }; diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 8471160316e0..02cd878e209f 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { .scc_offset = 0x0300, .taps = rcar_gen2_scc_taps, .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), + .max_blk_count = 0xffffffff, }; /* Definitions for sampling clocks */ diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 10f5219b3b40..f31333e831a7 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1530,7 +1530,7 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host) return ret; } - ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, false, 0, NULL); + ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for WP %d\n", ret); diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c index bdbd4897c0f7..a6c2bd202b45 100644 --- a/drivers/mmc/host/sdhci-bcm-kona.c +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -18,12 +18,10 @@ #include <linux/platform_device.h> #include <linux/mmc/host.h> #include <linux/io.h> -#include <linux/gpio.h> #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/mmc/slot-gpio.h> #include "sdhci-pltfm.h" diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 552bddc5096c..1cd10356fc14 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -55,7 +55,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) } sdhci_get_of_property(pdev); - mmc_of_parse(host->mmc); + res = mmc_of_parse(host->mmc); + if (res) + goto err; /* * Supply the existing CAPS, but clear the UHS modes. This diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d0d319398a54..8dbbc1f62b70 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -25,6 +25,7 @@ #include <linux/pm_runtime.h> #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" +#include "cqhci.h" #define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f #define ESDHC_CTRL_D3CD 0x08 @@ -50,6 +51,7 @@ #define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24) #define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) #define ESDHC_MIX_CTRL_HS400_EN (1 << 26) +#define ESDHC_MIX_CTRL_HS400_ES_EN (1 << 27) /* Bits 3 and 6 are not SDHCI standard definitions */ #define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 /* Tuning bits */ @@ -76,6 +78,9 @@ #define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1) #define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1 +#define ESDHC_VEND_SPEC2 0xc8 +#define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8) + #define ESDHC_TUNING_CTRL 0xcc #define ESDHC_STD_TUNING_EN (1 << 24) /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ @@ -103,6 +108,9 @@ */ #define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28) +/* the address offset of CQHCI */ +#define ESDHC_CQHCI_ADDR_OFFSET 0x100 + /* * The CMDTYPE of the CMD register (offset 0xE) should be set to * "11" when the STOP CMD12 is issued on imx53 to abort one @@ -138,51 +146,71 @@ #define ESDHC_FLAG_HS200 BIT(8) /* The IP supports HS400 mode */ #define ESDHC_FLAG_HS400 BIT(9) - -/* A clock frequency higher than this rate requires strobe dll control */ -#define ESDHC_STROBE_DLL_CLK_FREQ 100000000 +/* + * The IP has errata ERR010450 + * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't + * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz. + */ +#define ESDHC_FLAG_ERR010450 BIT(10) +/* The IP supports HS400ES mode */ +#define ESDHC_FLAG_HS400_ES BIT(11) +/* The IP has Host Controller Interface for Command Queuing */ +#define ESDHC_FLAG_CQHCI BIT(12) struct esdhc_soc_data { u32 flags; }; -static struct esdhc_soc_data esdhc_imx25_data = { +static const struct esdhc_soc_data esdhc_imx25_data = { .flags = ESDHC_FLAG_ERR004536, }; -static struct esdhc_soc_data esdhc_imx35_data = { +static const struct esdhc_soc_data esdhc_imx35_data = { .flags = ESDHC_FLAG_ERR004536, }; -static struct esdhc_soc_data esdhc_imx51_data = { +static const struct esdhc_soc_data esdhc_imx51_data = { .flags = 0, }; -static struct esdhc_soc_data esdhc_imx53_data = { +static const struct esdhc_soc_data esdhc_imx53_data = { .flags = ESDHC_FLAG_MULTIBLK_NO_INT, }; -static struct esdhc_soc_data usdhc_imx6q_data = { +static const struct esdhc_soc_data usdhc_imx6q_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING, }; -static struct esdhc_soc_data usdhc_imx6sl_data = { +static const struct esdhc_soc_data usdhc_imx6sl_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536 | ESDHC_FLAG_HS200, }; -static struct esdhc_soc_data usdhc_imx6sx_data = { +static const struct esdhc_soc_data usdhc_imx6sx_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200, }; -static struct esdhc_soc_data usdhc_imx7d_data = { +static const struct esdhc_soc_data usdhc_imx6ull_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_ERR010450, +}; + +static const struct esdhc_soc_data usdhc_imx7d_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400, }; +static struct esdhc_soc_data usdhc_imx8qxp_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES + | ESDHC_FLAG_CQHCI, +}; + struct pltfm_imx_data { u32 scratchpad; struct pinctrl *pinctrl; @@ -227,7 +255,9 @@ static const struct of_device_id imx_esdhc_dt_ids[] = { { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, }, { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, + { .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, }, { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, }, + { .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); @@ -733,6 +763,14 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) { + unsigned int max_clock; + + max_clock = imx_data->is_ddr ? 45000000 : 150000000; + + clock = min(clock, max_clock); + } + while (host_clock / (16 * pre_div * ddr_pre_div) > clock && pre_div < 256) pre_div *= 2; @@ -801,6 +839,20 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) SDHCI_HOST_CONTROL); } +static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* + * i.MX uSDHC internally already uses a fixed optimized timing for + * DDR50, normally does not require tuning for DDR50 mode. + */ + if (host->timing == MMC_TIMING_UHS_DDR50) + return 0; + + return sdhci_execute_tuning(mmc, opcode); +} + static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) { u32 reg; @@ -864,6 +916,19 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) return ret; } +static void esdhc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 m; + + m = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (ios->enhanced_strobe) + m |= ESDHC_MIX_CTRL_HS400_ES_EN; + else + m &= ~ESDHC_MIX_CTRL_HS400_ES_EN; + writel(m, host->ioaddr + ESDHC_MIX_CTRL); +} + static int esdhc_change_pinstate(struct sdhci_host *host, unsigned int uhs) { @@ -905,39 +970,35 @@ static int esdhc_change_pinstate(struct sdhci_host *host, * edge of data_strobe line. Due to the time delay between CLK line and * data_strobe line, if the delay time is larger than one clock cycle, * then CLK and data_strobe line will be misaligned, read error shows up. - * So when the CLK is higher than 100MHz, each clock cycle is short enough, - * host should configure the delay target. */ static void esdhc_set_strobe_dll(struct sdhci_host *host) { u32 v; - if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) { - /* disable clock before enabling strobe dll */ - writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) & - ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, - host->ioaddr + ESDHC_VENDOR_SPEC); + /* disable clock before enabling strobe dll */ + writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) & + ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); - /* force a reset on strobe dll */ - writel(ESDHC_STROBE_DLL_CTRL_RESET, - host->ioaddr + ESDHC_STROBE_DLL_CTRL); - /* - * enable strobe dll ctrl and adjust the delay target - * for the uSDHC loopback read clock - */ - v = ESDHC_STROBE_DLL_CTRL_ENABLE | - (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); - writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL); - /* wait 1us to make sure strobe dll status register stable */ - udelay(1); - v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS); - if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK)) - dev_warn(mmc_dev(host->mmc), - "warning! HS400 strobe DLL status REF not lock!\n"); - if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK)) - dev_warn(mmc_dev(host->mmc), - "warning! HS400 strobe DLL status SLV not lock!\n"); - } + /* force a reset on strobe dll */ + writel(ESDHC_STROBE_DLL_CTRL_RESET, + host->ioaddr + ESDHC_STROBE_DLL_CTRL); + /* + * enable strobe dll ctrl and adjust the delay target + * for the uSDHC loopback read clock + */ + v = ESDHC_STROBE_DLL_CTRL_ENABLE | + (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); + writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL); + /* wait 1us to make sure strobe dll status register stable */ + udelay(1); + v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS); + if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK)) + dev_warn(mmc_dev(host->mmc), + "warning! HS400 strobe DLL status REF not lock!\n"); + if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK)) + dev_warn(mmc_dev(host->mmc), + "warning! HS400 strobe DLL status SLV not lock!\n"); } static void esdhc_reset_tuning(struct sdhci_host *host) @@ -979,6 +1040,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) case MMC_TIMING_UHS_SDR25: case MMC_TIMING_UHS_SDR50: case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS: case MMC_TIMING_MMC_HS200: writel(m, host->ioaddr + ESDHC_MIX_CTRL); break; @@ -1042,6 +1104,19 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) SDHCI_TIMEOUT_CONTROL); } +static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, @@ -1058,6 +1133,7 @@ static struct sdhci_ops sdhci_esdhc_ops = { .set_bus_width = esdhc_pltfm_set_bus_width, .set_uhs_signaling = esdhc_set_uhs_signaling, .reset = esdhc_reset, + .irq = esdhc_cqhci_irq, }; static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { @@ -1095,16 +1171,34 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) | ESDHC_BURST_LEN_EN_INCR, host->ioaddr + SDHCI_HOST_CONTROL); + /* - * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL - * TO1.1, it's harmless for MX6SL - */ - writel(readl(host->ioaddr + 0x6c) | BIT(7), + * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL + * TO1.1, it's harmless for MX6SL + */ + writel(readl(host->ioaddr + 0x6c) & ~BIT(7), host->ioaddr + 0x6c); /* disable DLL_CTRL delay line settings */ writel(0x0, host->ioaddr + ESDHC_DLL_CTRL); + /* + * For the case of command with busy, if set the bit + * ESDHC_VEND_SPEC2_EN_BUSY_IRQ, USDHC will generate a + * transfer complete interrupt when busy is deasserted. + * When CQHCI use DCMD to send a CMD need R1b respons, + * CQHCI require to set ESDHC_VEND_SPEC2_EN_BUSY_IRQ, + * otherwise DCMD will always meet timeout waiting for + * hardware interrupt issue. + */ + if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) { + tmp = readl(host->ioaddr + ESDHC_VEND_SPEC2); + tmp |= ESDHC_VEND_SPEC2_EN_BUSY_IRQ; + writel(tmp, host->ioaddr + ESDHC_VEND_SPEC2); + + host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; + } + if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); tmp |= ESDHC_STD_TUNING_EN | @@ -1120,10 +1214,81 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) << ESDHC_TUNING_STEP_SHIFT; } writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); + } else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + /* + * ESDHC_STD_TUNING_EN may be configed in bootloader + * or ROM code, so clear this bit here to make sure + * the manual tuning can work. + */ + tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); + tmp &= ~ESDHC_STD_TUNING_EN; + writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); } } } +static void esdhc_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct cqhci_host *cq_host = mmc->cqe_private; + u32 reg; + u16 mode; + int count = 10; + + /* + * CQE gets stuck if it sees Buffer Read Enable bit set, which can be + * the case after tuning, so ensure the buffer is drained. + */ + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + if (count-- == 0) { + dev_warn(mmc_dev(host->mmc), + "CQE may get stuck because the Buffer Read Enable bit is set\n"); + break; + } + mdelay(1); + } + + /* + * Runtime resume will reset the entire host controller, which + * will also clear the DMAEN/BCEN of register ESDHC_MIX_CTRL. + * Here set DMAEN and BCEN when enable CMDQ. + */ + mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); + if (host->flags & SDHCI_REQ_USE_DMA) + mode |= SDHCI_TRNS_DMA; + if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) + mode |= SDHCI_TRNS_BLK_CNT_EN; + sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); + + /* + * Though Runtime resume reset the entire host controller, + * but do not impact the CQHCI side, need to clear the + * HALT bit, avoid CQHCI stuck in the first request when + * system resume back. + */ + cqhci_writel(cq_host, 0, CQHCI_CTL); + if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) + dev_err(mmc_dev(host->mmc), + "failed to exit halt state when enable CQE\n"); + + + sdhci_cqe_enable(mmc); +} + +static void esdhc_sdhci_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static const struct cqhci_host_ops esdhc_cqhci_ops = { + .enable = esdhc_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = esdhc_sdhci_dumpregs, +}; + #ifdef CONFIG_OF static int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, @@ -1200,7 +1365,7 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, host->mmc->parent->platform_data); /* write_protect */ if (boarddata->wp_type == ESDHC_WP_GPIO) { - err = mmc_gpiod_request_ro(host->mmc, "wp", 0, false, 0, NULL); + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); if (err) { dev_err(mmc_dev(host->mmc), "failed to request write-protect gpio!\n"); @@ -1255,6 +1420,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) of_match_device(imx_esdhc_dt_ids, &pdev->dev); struct sdhci_pltfm_host *pltfm_host; struct sdhci_host *host; + struct cqhci_host *cq_host; int err; struct pltfm_imx_data *imx_data; @@ -1321,6 +1487,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) writel(0x0, host->ioaddr + ESDHC_MIX_CTRL); writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS); writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); + + /* + * Link usdhc specific mmc_host_ops execute_tuning function, + * to replace the standard one in sdhci_ops. + */ + host->mmc_host_ops.execute_tuning = usdhc_execute_tuning; } if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) @@ -1333,6 +1505,28 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_HS400) host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400; + if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { + host->mmc->caps2 |= MMC_CAP2_HS400_ES; + host->mmc_host_ops.hs400_enhanced_strobe = + esdhc_hs400_enhanced_strobe; + } + + if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) { + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + err = -ENOMEM; + goto disable_ahb_clk; + } + + cq_host->mmio = host->ioaddr + ESDHC_CQHCI_ADDR_OFFSET; + cq_host->ops = &esdhc_cqhci_ops; + + err = cqhci_init(cq_host, host->mmc, false); + if (err) + goto disable_ahb_clk; + } + if (of_id) err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); else @@ -1340,6 +1534,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (err) goto disable_ahb_clk; + host->tuning_delay = 1; + sdhci_esdhc_imx_hwinit(host); err = sdhci_add_host(host); @@ -1391,6 +1587,13 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev) static int sdhci_esdhc_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); + int ret; + + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); @@ -1401,11 +1604,19 @@ static int sdhci_esdhc_suspend(struct device *dev) static int sdhci_esdhc_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); + int ret; /* re-initialize hw state in case it's lost in low power mode */ sdhci_esdhc_imx_hwinit(host); - return sdhci_resume_host(host); + ret = sdhci_resume_host(host); + if (ret) + return ret; + + if (host->mmc->caps2 & MMC_CAP2_CQE) + ret = cqhci_resume(host->mmc); + + return ret; } #endif @@ -1417,6 +1628,12 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev) struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); int ret; + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + ret = sdhci_runtime_suspend_host(host); if (ret) return ret; @@ -1460,7 +1677,10 @@ static int sdhci_esdhc_runtime_resume(struct device *dev) if (err) goto disable_ipg_clk; - return 0; + if (host->mmc->caps2 & MMC_CAP2_CQE) + err = cqhci_resume(host->mmc); + + return err; disable_ipg_clk: if (!sdhci_sdio_irq_enabled(host)) diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index c11c18a9aacb..b1a66ca3821a 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1097,7 +1097,6 @@ static int sdhci_omap_probe(struct platform_device *pdev) goto err_put_sync; } - host->mmc_host_ops.get_ro = mmc_gpio_get_ro; host->mmc_host_ops.start_signal_voltage_switch = sdhci_omap_start_signal_voltage_switch; host->mmc_host_ops.set_ios = sdhci_omap_set_ios; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 2a6eba74b94e..99b0fec2836b 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1257,16 +1257,6 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) } #endif -static const struct sdhci_pci_fixes sdhci_o2 = { - .probe = sdhci_pci_o2_probe, - .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, - .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, - .probe_slot = sdhci_pci_o2_probe_slot, -#ifdef CONFIG_PM_SLEEP - .resume = sdhci_pci_o2_resume, -#endif -}; - static const struct sdhci_pci_fixes sdhci_jmicron = { .probe = jmicron_probe, diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index cc3ffeffd7a2..05a012a694b2 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -60,6 +60,13 @@ #define O2_SD_VENDOR_SETTING2 0x1C8 #define O2_SD_HW_TUNING_DISABLE BIT(4) +#define O2_PLL_WDT_CONTROL1 0x1CC +#define O2_PLL_FORCE_ACTIVE BIT(18) +#define O2_PLL_LOCK_STATUS BIT(14) +#define O2_PLL_SOFT_RESET BIT(12) + +#define O2_SD_DETECT_SETTING 0x324 + static void sdhci_o2_set_tuning_mode(struct sdhci_host *host) { u16 reg; @@ -283,6 +290,113 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip, host->irq = pci_irq_vector(chip->pdev, 0); } +static void sdhci_o2_wait_card_detect_stable(struct sdhci_host *host) +{ + ktime_t timeout; + u32 scratch32; + + /* Wait max 50 ms */ + timeout = ktime_add_ms(ktime_get(), 50); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + scratch32 = sdhci_readl(host, SDHCI_PRESENT_STATE); + if ((scratch32 & SDHCI_CARD_PRESENT) >> SDHCI_CARD_PRES_SHIFT + == (scratch32 & SDHCI_CD_LVL) >> SDHCI_CD_LVL_SHIFT) + break; + + if (timedout) { + pr_err("%s: Card Detect debounce never finished.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + return; + } + udelay(10); + } +} + +static void sdhci_o2_enable_internal_clock(struct sdhci_host *host) +{ + ktime_t timeout; + u16 scratch; + u32 scratch32; + + /* PLL software reset */ + scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1); + scratch32 |= O2_PLL_SOFT_RESET; + sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1); + udelay(1); + scratch32 &= ~(O2_PLL_SOFT_RESET); + sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1); + + /* PLL force active */ + scratch32 |= O2_PLL_FORCE_ACTIVE; + sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + scratch = sdhci_readw(host, O2_PLL_WDT_CONTROL1); + if (scratch & O2_PLL_LOCK_STATUS) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + goto out; + } + udelay(10); + } + + /* Wait for card detect finish */ + udelay(1); + sdhci_o2_wait_card_detect_stable(host); + +out: + /* Cancel PLL force active */ + scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1); + scratch32 &= ~O2_PLL_FORCE_ACTIVE; + sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1); +} + +static int sdhci_o2_get_cd(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + + sdhci_o2_enable_internal_clock(host); + + return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +} + +static void sdhci_o2_enable_clk(struct sdhci_host *host, u16 clk) +{ + /* Enable internal clock */ + clk |= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (sdhci_o2_get_cd(host->mmc)) { + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + } +} + +void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_o2_enable_clk(host, clk); +} + int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) { struct sdhci_pci_chip *chip; @@ -314,9 +428,14 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) mmc_hostname(host->mmc)); host->flags &= ~SDHCI_SIGNALING_330; host->flags |= SDHCI_SIGNALING_180; + host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; host->mmc->caps2 |= MMC_CAP2_NO_SD; host->mmc->caps2 |= MMC_CAP2_NO_SDIO; + pci_write_config_dword(chip->pdev, + O2_SD_DETECT_SETTING, 3); } + + slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd; } host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning; @@ -490,9 +609,6 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; case PCI_DEVICE_ID_O2_SEABIRD0: - if (chip->pdev->revision == 0x01) - chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; - /* fall through */ case PCI_DEVICE_ID_O2_SEABIRD1: /* UnLock WP */ ret = pci_read_config_byte(chip->pdev, @@ -550,3 +666,21 @@ int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip) return sdhci_pci_resume_host(chip); } #endif + +static const struct sdhci_ops sdhci_pci_o2_ops = { + .set_clock = sdhci_pci_o2_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +const struct sdhci_pci_fixes sdhci_o2 = { + .probe = sdhci_pci_o2_probe, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .probe_slot = sdhci_pci_o2_probe_slot, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_o2_resume, +#endif + .ops = &sdhci_pci_o2_ops, +}; diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 2ef0bdca9197..4ddb69a15cd7 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -179,13 +179,9 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot) int sdhci_pci_resume_host(struct sdhci_pci_chip *chip); #endif int sdhci_pci_enable_dma(struct sdhci_host *host); -int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); -int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); -#ifdef CONFIG_PM_SLEEP -int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); -#endif extern const struct sdhci_pci_fixes sdhci_arasan; extern const struct sdhci_pci_fixes sdhci_snps; +extern const struct sdhci_pci_fixes sdhci_o2; #endif /* __SDHCI_PCI_H */ diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index 2c3827f54927..cdc8e16b4567 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -23,7 +23,6 @@ #include <linux/clk.h> #include <linux/module.h> #include <linux/io.h> -#include <linux/gpio.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/platform_data/pxa_sdhci.h> diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index e6ace31e2a41..32e62904c0d3 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -33,6 +33,7 @@ #include <linux/ktime.h> #include "sdhci-pltfm.h" +#include "cqhci.h" /* Tegra SDHOST controller vendor register definitions */ #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 @@ -75,6 +76,7 @@ #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31) +#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000 #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) @@ -89,6 +91,9 @@ #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7) #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) +/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ +#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 + struct sdhci_tegra_soc_data { const struct sdhci_pltfm_data *pdata; u32 nvquirks; @@ -121,6 +126,8 @@ struct sdhci_tegra { struct pinctrl *pinctrl_sdmmc; struct pinctrl_state *pinctrl_state_3v3; struct pinctrl_state *pinctrl_state_1v8; + struct pinctrl_state *pinctrl_state_3v3_drv; + struct pinctrl_state *pinctrl_state_1v8_drv; struct sdhci_tegra_autocal_offsets autocal_offsets; ktime_t last_calib; @@ -128,6 +135,7 @@ struct sdhci_tegra { u32 default_tap; u32 default_trim; u32 dqs_trim; + bool enable_hwcq; }; static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) @@ -237,11 +245,6 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) } } -static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) -{ - return mmc_gpio_get_ro(host->mmc); -} - static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -411,6 +414,76 @@ static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host, sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); } +static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage, + bool state_drvupdn) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct sdhci_tegra_autocal_offsets *offsets = + &tegra_host->autocal_offsets; + struct pinctrl_state *pinctrl_drvupdn = NULL; + int ret = 0; + u8 drvup = 0, drvdn = 0; + u32 reg; + + if (!state_drvupdn) { + /* PADS Drive Strength */ + if (voltage == MMC_SIGNAL_VOLTAGE_180) { + if (tegra_host->pinctrl_state_1v8_drv) { + pinctrl_drvupdn = + tegra_host->pinctrl_state_1v8_drv; + } else { + drvup = offsets->pull_up_1v8_timeout; + drvdn = offsets->pull_down_1v8_timeout; + } + } else { + if (tegra_host->pinctrl_state_3v3_drv) { + pinctrl_drvupdn = + tegra_host->pinctrl_state_3v3_drv; + } else { + drvup = offsets->pull_up_3v3_timeout; + drvdn = offsets->pull_down_3v3_timeout; + } + } + + if (pinctrl_drvupdn != NULL) { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + pinctrl_drvupdn); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "failed pads drvupdn, ret: %d\n", ret); + } else if ((drvup) || (drvdn)) { + reg = sdhci_readl(host, + SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK; + reg |= (drvup << 20) | (drvdn << 12); + sdhci_writel(host, reg, + SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + } + + } else { + /* Dual Voltage PADS Voltage selection */ + if (!tegra_host->pad_control_available) + return 0; + + if (voltage == MMC_SIGNAL_VOLTAGE_180) { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + tegra_host->pinctrl_state_1v8); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "setting 1.8V failed, ret: %d\n", ret); + } else { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + tegra_host->pinctrl_state_3v3); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "setting 3.3V failed, ret: %d\n", ret); + } + } + + return ret; +} + static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -437,6 +510,7 @@ static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3; } + /* Set initial offset before auto-calibration */ tegra_sdhci_set_pad_autocal_offset(host, pdpu); card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); @@ -460,19 +534,15 @@ static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) if (ret) { dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n"); - if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) - pdpu = offsets.pull_down_1v8_timeout << 8 | - offsets.pull_up_1v8_timeout; - else - pdpu = offsets.pull_down_3v3_timeout << 8 | - offsets.pull_up_3v3_timeout; - - /* Disable automatic calibration and use fixed offsets */ + /* Disable automatic cal and use fixed Drive Strengths */ reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); reg &= ~SDHCI_AUTO_CAL_ENABLE; sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); - tegra_sdhci_set_pad_autocal_offset(host, pdpu); + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "Setting drive strengths failed: %d\n", ret); } } @@ -511,26 +581,46 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-3v3-timeout", &autocal->pull_up_3v3_timeout); - if (err) + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_3v3) && + (tegra_host->pinctrl_state_3v3_drv == NULL)) + pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", + mmc_hostname(host->mmc)); autocal->pull_up_3v3_timeout = 0; + } err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-down-offset-3v3-timeout", &autocal->pull_down_3v3_timeout); - if (err) + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_3v3) && + (tegra_host->pinctrl_state_3v3_drv == NULL)) + pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", + mmc_hostname(host->mmc)); autocal->pull_down_3v3_timeout = 0; + } err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-1v8-timeout", &autocal->pull_up_1v8_timeout); - if (err) + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_1v8) && + (tegra_host->pinctrl_state_1v8_drv == NULL)) + pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", + mmc_hostname(host->mmc)); autocal->pull_up_1v8_timeout = 0; + } err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-down-offset-1v8-timeout", &autocal->pull_down_1v8_timeout); - if (err) + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_1v8) && + (tegra_host->pinctrl_state_1v8_drv == NULL)) + pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", + mmc_hostname(host->mmc)); autocal->pull_down_1v8_timeout = 0; + } err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-sdr104", @@ -595,6 +685,20 @@ static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host) tegra_host->dqs_trim = 0x11; } +static void tegra_sdhci_parse_dt(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + + if (device_property_read_bool(host->mmc->parent, "supports-cqe")) + tegra_host->enable_hwcq = true; + else + tegra_host->enable_hwcq = false; + + tegra_sdhci_parse_pad_autocal_dt(host); + tegra_sdhci_parse_tap_and_trim(host); +} + static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -743,32 +847,6 @@ static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) return mmc_send_tuning(host->mmc, opcode, NULL); } -static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); - int ret; - - if (!tegra_host->pad_control_available) - return 0; - - if (voltage == MMC_SIGNAL_VOLTAGE_180) { - ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, - tegra_host->pinctrl_state_1v8); - if (ret < 0) - dev_err(mmc_dev(host->mmc), - "setting 1.8V failed, ret: %d\n", ret); - } else { - ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, - tegra_host->pinctrl_state_3v3); - if (ret < 0) - dev_err(mmc_dev(host->mmc), - "setting 3.3V failed, ret: %d\n", ret); - } - - return ret; -} - static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -778,7 +856,7 @@ static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, int ret = 0; if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { - ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage); + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); if (ret < 0) return ret; ret = sdhci_start_signal_voltage_switch(mmc, ios); @@ -786,7 +864,7 @@ static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, ret = sdhci_start_signal_voltage_switch(mmc, ios); if (ret < 0) return ret; - ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage); + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); } if (tegra_host->pad_calib_required) @@ -805,6 +883,20 @@ static int tegra_sdhci_init_pinctrl_info(struct device *dev, return -1; } + tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state( + tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv"); + if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) { + if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV) + tegra_host->pinctrl_state_1v8_drv = NULL; + } + + tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state( + tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv"); + if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) { + if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV) + tegra_host->pinctrl_state_3v3_drv = NULL; + } + tegra_host->pinctrl_state_3v3 = pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3"); if (IS_ERR(tegra_host->pinctrl_state_3v3)) { @@ -836,8 +928,50 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host) tegra_host->pad_calib_required = true; } +static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + u32 cqcfg = 0; + + /* + * Tegra SDMMC Controller design prevents write access to BLOCK_COUNT + * registers when CQE is enabled. + */ + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + if (cqcfg & CQHCI_ENABLE) + cqhci_writel(cq_host, (cqcfg & ~CQHCI_ENABLE), CQHCI_CFG); + + sdhci_cqe_enable(mmc); + + if (cqcfg & CQHCI_ENABLE) + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); +} + +static void sdhci_tegra_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { + .enable = sdhci_tegra_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_tegra_dumpregs, +}; + static const struct sdhci_ops tegra_sdhci_ops = { - .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, @@ -893,7 +1027,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = { }; static const struct sdhci_ops tegra114_sdhci_ops = { - .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_w = tegra_sdhci_writew, .write_l = tegra_sdhci_writel, @@ -947,7 +1080,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = { }; static const struct sdhci_ops tegra210_sdhci_ops = { - .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_w = tegra210_sdhci_writew, .write_l = tegra_sdhci_writel, @@ -980,7 +1112,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = { }; static const struct sdhci_ops tegra186_sdhci_ops = { - .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, @@ -989,6 +1120,7 @@ static const struct sdhci_ops tegra186_sdhci_ops = { .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, .voltage_switch = tegra_sdhci_voltage_switch, .get_max_clock = tegra_sdhci_get_max_clock, + .irq = sdhci_tegra_cqhci_irq, }; static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { @@ -1030,6 +1162,54 @@ static const struct of_device_id sdhci_tegra_dt_match[] = { }; MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); +static int sdhci_tegra_add_host(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!tegra_host->enable_hwcq) + return sdhci_add_host(host); + + sdhci_enable_v4_mode(host); + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + + cq_host = devm_kzalloc(host->mmc->parent, + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR; + cq_host->ops = &sdhci_tegra_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + static int sdhci_tegra_probe(struct platform_device *pdev) { const struct of_device_id *match; @@ -1077,9 +1257,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) host->mmc->caps |= MMC_CAP_1_8V_DDR; - tegra_sdhci_parse_pad_autocal_dt(host); - - tegra_sdhci_parse_tap_and_trim(host); + tegra_sdhci_parse_dt(host); tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", GPIOD_OUT_HIGH); @@ -1117,7 +1295,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) usleep_range(2000, 4000); - rc = sdhci_add_host(host); + rc = sdhci_tegra_add_host(host); if (rc) goto err_add_host; diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index 5b5eb53a63d2..8d07ee1b8f08 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -530,7 +530,7 @@ static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host, ret = true; break; } - /* else: fall through */ + /* fall through */ default: reg &= ~XENON_TIMING_ADJUST_SLOW_MODE; ret = false; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index eba9bcc92ad3..a8141ff9be03 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -883,7 +883,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, bool *too_big) { u8 count; - struct mmc_data *data = cmd->data; + struct mmc_data *data; unsigned target_timeout, current_timeout; *too_big = true; @@ -897,6 +897,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) return 0xE; + /* Unspecified command, asume max */ + if (cmd == NULL) + return 0xE; + + data = cmd->data; /* Unspecified timeout, assume max */ if (!data && !cmd->busy_timeout) return 0xE; @@ -2048,6 +2053,8 @@ static int sdhci_check_ro(struct sdhci_host *host) is_readonly = 0; else if (host->ops->get_ro) is_readonly = host->ops->get_ro(host); + else if (mmc_can_gpio_ro(host->mmc)) + is_readonly = mmc_gpio_get_ro(host->mmc); else is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_WRITE_PROTECT); @@ -2376,6 +2383,10 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) return -ETIMEDOUT; } + /* Spec does not require a delay between tuning cycles */ + if (host->tuning_delay > 0) + mdelay(host->tuning_delay); + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { if (ctrl & SDHCI_CTRL_TUNED_CLK) @@ -2383,9 +2394,6 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) break; } - /* Spec does not require a delay between tuning cycles */ - if (host->tuning_delay > 0) - mdelay(host->tuning_delay); } pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", @@ -3353,7 +3361,14 @@ void sdhci_cqe_enable(struct mmc_host *mmc) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ctrl &= ~SDHCI_CTRL_DMA_MASK; - if (host->flags & SDHCI_USE_64_BIT_DMA) + /* + * Host from V4.10 supports ADMA3 DMA type. + * ADMA3 performs integrated descriptor which is more suitable + * for cmd queuing to fetch both command and transfer descriptors. + */ + if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) + ctrl |= SDHCI_CTRL_ADMA3; + else if (host->flags & SDHCI_USE_64_BIT_DMA) ctrl |= SDHCI_CTRL_ADMA64; else ctrl |= SDHCI_CTRL_ADMA32; @@ -3363,7 +3378,7 @@ void sdhci_cqe_enable(struct mmc_host *mmc) SDHCI_BLOCK_SIZE); /* Set maximum timeout */ - sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL); + sdhci_set_timeout(host, NULL); host->ier = host->cqe_ier; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 6cc9a3c2ac66..01002cba1359 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -73,6 +73,10 @@ #define SDHCI_SPACE_AVAILABLE 0x00000400 #define SDHCI_DATA_AVAILABLE 0x00000800 #define SDHCI_CARD_PRESENT 0x00010000 +#define SDHCI_CARD_PRES_SHIFT 16 +#define SDHCI_CD_STABLE 0x00020000 +#define SDHCI_CD_LVL 0x00040000 +#define SDHCI_CD_LVL_SHIFT 18 #define SDHCI_WRITE_PROTECT 0x00080000 #define SDHCI_DATA_LVL_MASK 0x00F00000 #define SDHCI_DATA_LVL_SHIFT 20 @@ -88,6 +92,7 @@ #define SDHCI_CTRL_ADMA1 0x08 #define SDHCI_CTRL_ADMA32 0x10 #define SDHCI_CTRL_ADMA64 0x18 +#define SDHCI_CTRL_ADMA3 0x18 #define SDHCI_CTRL_8BITBUS 0x20 #define SDHCI_CTRL_CDTEST_INS 0x40 #define SDHCI_CTRL_CDTEST_EN 0x80 @@ -230,6 +235,7 @@ #define SDHCI_RETUNING_MODE_SHIFT 14 #define SDHCI_CLOCK_MUL_MASK 0x00FF0000 #define SDHCI_CLOCK_MUL_SHIFT 16 +#define SDHCI_CAN_DO_ADMA3 0x08000000 #define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */ #define SDHCI_CAPABILITIES_1 0x44 diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 8c05879850a0..eea183e90f1b 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -158,7 +158,7 @@ static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode, sdhci_set_power_noreg(host, mode, vdd); } -struct sdhci_ops sdhci_am654_ops = { +static struct sdhci_ops sdhci_am654_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, .set_uhs_signaling = sdhci_set_uhs_signaling, diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 70fadc976795..2901a5773d83 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -19,7 +19,6 @@ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/err.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> @@ -32,7 +31,6 @@ #include <linux/mmc/slot-gpio.h> #include <linux/module.h> #include <linux/of_address.h> -#include <linux/of_gpio.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index c03529e3f01a..2adb0d24360f 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); } +static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) +{ + iowrite32(val, host->ctl + (addr << host->bus_shift)); +} + static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, const u32 *buf, int count) { diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 085a0fab769c..595949f1f001 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -43,6 +43,7 @@ #include <linux/regulator/consumer.h> #include <linux/mmc/sdio.h> #include <linux/scatterlist.h> +#include <linux/sizes.h> #include <linux/spinlock.h> #include <linux/swiotlb.h> #include <linux/workqueue.h> @@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg, return false; } -static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) +static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) { struct mmc_host *mmc = host->mmc; struct tmio_mmc_data *pdata = host->pdata; @@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) unsigned int sdio_status; if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) - return; + return false; status = sd_ctrl_read16(host, CTL_SDIO_STATUS); ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask; @@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) mmc_signal_sdio_irq(mmc); + + return ireg; } irqreturn_t tmio_mmc_irq(int irq, void *devid) @@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) if (__tmio_mmc_sdcard_irq(host, ireg, status)) return IRQ_HANDLED; - __tmio_mmc_sdio_irq(host); + if (__tmio_mmc_sdio_irq(host)) + return IRQ_HANDLED; - return IRQ_HANDLED; + return IRQ_NONE; } EXPORT_SYMBOL_GPL(tmio_mmc_irq); @@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, /* Set transfer length / blocksize */ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); + if (host->mmc->max_blk_count >= SZ_64K) + sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks); + else + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); tmio_mmc_start_dma(host, data); @@ -1066,7 +1073,7 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) /* use ocr_mask if no regulator */ if (!mmc->ocr_avail) - mmc->ocr_avail = pdata->ocr_mask; + mmc->ocr_avail = pdata->ocr_mask; /* * try again. @@ -1287,6 +1294,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) cancel_delayed_work_sync(&host->delayed_reset_work); tmio_mmc_release_dma(host); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); } diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 3ba42f508014..4fd6da29489e 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -19,7 +19,6 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/clk.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/of.h> diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 6e8e7b1bb34b..79a53cb8507b 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -756,7 +756,8 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, } numvirtchips = cfi->numchips * numparts; - newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL); + newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips), + GFP_KERNEL); if (!newcfi) return -ENOMEM; shared = kmalloc_array(cfi->numchips, diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c index 837b04ab96a9..839ed40625d6 100644 --- a/drivers/mtd/chips/gen_probe.c +++ b/drivers/mtd/chips/gen_probe.c @@ -135,7 +135,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi * our caller, and copy the appropriate data into them. */ - retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL); + retcfi = kmalloc(struct_size(retcfi, chips, cfi.numchips), GFP_KERNEL); if (!retcfi) { kfree(cfi.cfiq); diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 4c94fc096696..7754803e3463 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1767,8 +1767,8 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) switch (chip_id) { case DOC_CHIPID_G3: - mtd->name = kasprintf(GFP_KERNEL, "docg3.%d", - docg3->device_id); + mtd->name = devm_kasprintf(docg3->dev, GFP_KERNEL, "docg3.%d", + docg3->device_id); if (!mtd->name) return -ENOMEM; docg3->max_block = 2047; @@ -1872,7 +1872,7 @@ nomem3: nomem2: kfree(docg3); nomem1: - return ERR_PTR(ret); + return ret ? ERR_PTR(ret) : NULL; } /** @@ -1886,7 +1886,6 @@ static void doc_release_device(struct mtd_info *mtd) mtd_device_unregister(mtd); kfree(docg3->bbt); kfree(docg3); - kfree(mtd->name); kfree(mtd); } diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index c4a1d04b8c80..651bab6d4e31 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -195,7 +195,14 @@ static int m25p_probe(struct spi_mem *spimem) spi_mem_set_drvdata(spimem, flash); flash->spimem = spimem; - if (spi->mode & SPI_RX_QUAD) { + if (spi->mode & SPI_RX_OCTAL) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; + + if (spi->mode & SPI_TX_OCTAL) + hwcaps.mask |= (SNOR_HWCAPS_READ_1_8_8 | + SNOR_HWCAPS_PP_1_1_8 | + SNOR_HWCAPS_PP_1_8_8); + } else if (spi->mode & SPI_RX_QUAD) { hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; if (spi->mode & SPI_TX_QUAD) diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index 46238796145f..1c97fabc4bf9 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c @@ -24,14 +24,12 @@ static unsigned long writebuf_size = 64; #define MTDRAM_TOTAL_SIZE (total_size * 1024) #define MTDRAM_ERASE_SIZE (erase_size * 1024) -#ifdef MODULE module_param(total_size, ulong, 0); MODULE_PARM_DESC(total_size, "Total device size in KiB"); module_param(erase_size, ulong, 0); MODULE_PARM_DESC(erase_size, "Device erase block size in KiB"); module_param(writebuf_size, ulong, 0); MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)"); -#endif // We could store these in the mtd structure, but we only support 1 device.. static struct mtd_info *mtd_info; diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 22f753e555ac..83f88b8b5d9f 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev, * Going to have to check what details I need to set and how to * get them */ - mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node); + mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); mtd->type = MTD_NORFLASH; mtd->flags = MTD_WRITEABLE; mtd->size = size; diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c index 69f2112340b1..175bdc3b72f4 100644 --- a/drivers/mtd/lpddr/qinfo_probe.c +++ b/drivers/mtd/lpddr/qinfo_probe.c @@ -181,8 +181,8 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map) lpddr.numchips = 1; numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum; - retlpddr = kzalloc(sizeof(struct lpddr_private) + - numvirtchips * sizeof(struct flchip), GFP_KERNEL); + retlpddr = kzalloc(struct_size(retlpddr, chips, numvirtchips), + GFP_KERNEL); if (!retlpddr) return NULL; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 999b705769a8..76b4264936ff 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -155,7 +155,6 @@ static ssize_t mtd_flags_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); - } static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); @@ -166,7 +165,6 @@ static ssize_t mtd_size_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)mtd->size); - } static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); @@ -176,7 +174,6 @@ static ssize_t mtd_erasesize_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); - } static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); @@ -186,7 +183,6 @@ static ssize_t mtd_writesize_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); - } static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); @@ -197,7 +193,6 @@ static ssize_t mtd_subpagesize_show(struct device *dev, unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); - } static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); @@ -207,7 +202,6 @@ static ssize_t mtd_oobsize_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); - } static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); @@ -226,7 +220,6 @@ static ssize_t mtd_numeraseregions_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); - } static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, NULL); @@ -237,7 +230,6 @@ static ssize_t mtd_name_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); - } static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); @@ -507,6 +499,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) { struct nvmem_config config = {}; + config.id = -1; config.dev = &mtd->dev; config.name = mtd->name; config.owner = THIS_MODULE; @@ -559,6 +552,14 @@ int add_mtd_device(struct mtd_info *mtd) BUG_ON(mtd->writesize == 0); + /* + * MTD drivers should implement ->_{write,read}() or + * ->_{write,read}_oob(), but not both. + */ + if (WARN_ON((mtd->_write && mtd->_write_oob) || + (mtd->_read && mtd->_read_oob))) + return -EINVAL; + if (WARN_ON((!mtd->erasesize || !mtd->_erase) && !(mtd->flags & MTD_NO_ERASE))) return -EINVAL; @@ -1089,67 +1090,32 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { - int ret_code; - *retlen = 0; - if (from < 0 || from >= mtd->size || len > mtd->size - from) - return -EINVAL; - if (!len) - return 0; + struct mtd_oob_ops ops = { + .len = len, + .datbuf = buf, + }; + int ret; - ledtrig_mtd_activity(); - /* - * In the absence of an error, drivers return a non-negative integer - * representing the maximum number of bitflips that were corrected on - * any one ecc region (if applicable; zero otherwise). - */ - if (mtd->_read) { - ret_code = mtd->_read(mtd, from, len, retlen, buf); - } else if (mtd->_read_oob) { - struct mtd_oob_ops ops = { - .len = len, - .datbuf = buf, - }; - - ret_code = mtd->_read_oob(mtd, from, &ops); - *retlen = ops.retlen; - } else { - return -ENOTSUPP; - } + ret = mtd_read_oob(mtd, from, &ops); + *retlen = ops.retlen; - if (unlikely(ret_code < 0)) - return ret_code; - if (mtd->ecc_strength == 0) - return 0; /* device lacks ecc */ - return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; + return ret; } EXPORT_SYMBOL_GPL(mtd_read); int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { - *retlen = 0; - if (to < 0 || to >= mtd->size || len > mtd->size - to) - return -EINVAL; - if ((!mtd->_write && !mtd->_write_oob) || - !(mtd->flags & MTD_WRITEABLE)) - return -EROFS; - if (!len) - return 0; - ledtrig_mtd_activity(); + struct mtd_oob_ops ops = { + .len = len, + .datbuf = (u8 *)buf, + }; + int ret; - if (!mtd->_write) { - struct mtd_oob_ops ops = { - .len = len, - .datbuf = (u8 *)buf, - }; - int ret; + ret = mtd_write_oob(mtd, to, &ops); + *retlen = ops.retlen; - ret = mtd->_write_oob(mtd, to, &ops); - *retlen = ops.retlen; - return ret; - } - - return mtd->_write(mtd, to, len, retlen, buf); + return ret; } EXPORT_SYMBOL_GPL(mtd_write); diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 1a55d3e3d4c5..e604625e2dfa 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -541,4 +541,21 @@ config MTD_NAND_TEGRA is supported. Extra OOB bytes when using HW ECC are currently not supported. +config MTD_NAND_STM32_FMC2 + tristate "Support for NAND controller on STM32MP SoCs" + depends on MACH_STM32MP157 || COMPILE_TEST + help + Enables support for NAND Flash chips on SoCs containing the FMC2 + NAND controller. This controller is found on STM32MP SoCs. + The controller supports a maximum 8k page size and supports + a maximum 8-bit correction error per sector of 512 bytes. + +config MTD_NAND_MESON + tristate "Support for NAND controller on Amlogic's Meson SoCs" + depends on ARCH_MESON || COMPILE_TEST + select MFD_SYSCON + help + Enables support for NAND controller on Amlogic's Meson SoCs. + This controller is found on Meson SoCs. + endif # MTD_NAND diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 57159b349054..5a5a72f0793e 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -56,6 +56,8 @@ obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o +obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o +obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index 555a74e15269..9d3997840889 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -876,23 +876,32 @@ static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev, { struct platform_device *pdev; struct atmel_pmecc *pmecc, **ptr; + int ret; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); + pmecc = platform_get_drvdata(pdev); + if (!pmecc) { + ret = -EPROBE_DEFER; + goto err_put_device; + } ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return ERR_PTR(-ENOMEM); - - get_device(&pdev->dev); - pmecc = platform_get_drvdata(pdev); + if (!ptr) { + ret = -ENOMEM; + goto err_put_device; + } *ptr = pmecc; devres_add(userdev, ptr); return pmecc; + +err_put_device: + put_device(&pdev->dev); + return ERR_PTR(ret); } static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 }; diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 6e8edc9375dd..24aeafc67cd4 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -37,9 +37,6 @@ #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ -/* MAP10 commands */ -#define DENALI_ERASE 0x01 - #define DENALI_BANK(denali) ((denali)->active_bank << 24) #define DENALI_INVALID_BANK -1 @@ -476,7 +473,7 @@ static void denali_setup_dma32(struct denali_nand_info *denali, } static int denali_pio_read(struct denali_nand_info *denali, void *buf, - size_t size, int page, int raw) + size_t size, int page) { u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; uint32_t *buf32 = (uint32_t *)buf; @@ -504,7 +501,7 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf, } static int denali_pio_write(struct denali_nand_info *denali, - const void *buf, size_t size, int page, int raw) + const void *buf, size_t size, int page) { u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; const uint32_t *buf32 = (uint32_t *)buf; @@ -525,16 +522,16 @@ static int denali_pio_write(struct denali_nand_info *denali, } static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, - size_t size, int page, int raw, int write) + size_t size, int page, int write) { if (write) - return denali_pio_write(denali, buf, size, page, raw); + return denali_pio_write(denali, buf, size, page); else - return denali_pio_read(denali, buf, size, page, raw); + return denali_pio_read(denali, buf, size, page); } static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, - size_t size, int page, int raw, int write) + size_t size, int page, int write) { dma_addr_t dma_addr; uint32_t irq_mask, irq_status, ecc_err_mask; @@ -544,7 +541,7 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, dma_addr = dma_map_single(denali->dev, buf, size, dir); if (dma_mapping_error(denali->dev, dma_addr)) { dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); - return denali_pio_xfer(denali, buf, size, page, raw, write); + return denali_pio_xfer(denali, buf, size, page, write); } if (write) { @@ -598,9 +595,9 @@ static int denali_data_xfer(struct denali_nand_info *denali, void *buf, denali->reg + TRANSFER_SPARE_REG); if (denali->dma_avail) - return denali_dma_xfer(denali, buf, size, page, raw, write); + return denali_dma_xfer(denali, buf, size, page, write); else - return denali_pio_xfer(denali, buf, size, page, raw, write); + return denali_pio_xfer(denali, buf, size, page, write); } static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, @@ -754,9 +751,6 @@ static int denali_read_oob(struct nand_chip *chip, int page) static int denali_write_oob(struct nand_chip *chip, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - struct denali_nand_info *denali = mtd_to_denali(mtd); - - denali_reset_irq(denali); denali_oob_xfer(mtd, chip, page, 1); @@ -903,23 +897,6 @@ static int denali_waitfunc(struct nand_chip *chip) return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; } -static int denali_erase(struct nand_chip *chip, int page) -{ - struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); - uint32_t irq_status; - - denali_reset_irq(denali); - - denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, - DENALI_ERASE); - - /* wait for erase to complete or failure to occur */ - irq_status = denali_wait_for_irq(denali, - INTR__ERASE_COMP | INTR__ERASE_FAIL); - - return irq_status & INTR__ERASE_COMP ? 0 : -EIO; -} - static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, const struct nand_data_interface *conf) { @@ -1244,7 +1221,6 @@ static int denali_attach_chip(struct nand_chip *chip) chip->ecc.write_page_raw = denali_write_page_raw; chip->ecc.read_oob = denali_read_oob; chip->ecc.write_oob = denali_write_oob; - chip->legacy.erase = denali_erase; ret = denali_multidev_fixup(denali); if (ret) diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h index 25c00601b8b3..c8c2620fc736 100644 --- a/drivers/mtd/nand/raw/denali.h +++ b/drivers/mtd/nand/raw/denali.h @@ -304,7 +304,6 @@ struct denali_nand_info { u32 irq_status; /* interrupts that have happened */ int irq; void *buf; /* for syndrome layout conversion */ - dma_addr_t dma_addr; int dma_avail; /* can support DMA? */ int devs_per_cs; /* devices connected in parallel */ int oob_skip_bytes; /* number of bytes reserved for BBM */ diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c index 7c6a8a426606..0b5ae2418815 100644 --- a/drivers/mtd/nand/raw/denali_dt.c +++ b/drivers/mtd/nand/raw/denali_dt.c @@ -109,25 +109,17 @@ static int denali_dt_probe(struct platform_device *pdev) if (IS_ERR(denali->host)) return PTR_ERR(denali->host); - /* - * A single anonymous clock is supported for the backward compatibility. - * New platforms should support all the named clocks. - */ dt->clk = devm_clk_get(dev, "nand"); if (IS_ERR(dt->clk)) - dt->clk = devm_clk_get(dev, NULL); - if (IS_ERR(dt->clk)) { - dev_err(dev, "no clk available\n"); return PTR_ERR(dt->clk); - } dt->clk_x = devm_clk_get(dev, "nand_x"); if (IS_ERR(dt->clk_x)) - dt->clk_x = NULL; + return PTR_ERR(dt->clk_x); dt->clk_ecc = devm_clk_get(dev, "ecc"); if (IS_ERR(dt->clk_ecc)) - dt->clk_ecc = NULL; + return PTR_ERR(dt->clk_ecc); ret = clk_prepare_enable(dt->clk); if (ret) @@ -141,19 +133,8 @@ static int denali_dt_probe(struct platform_device *pdev) if (ret) goto out_disable_clk_x; - if (dt->clk_x) { - denali->clk_rate = clk_get_rate(dt->clk); - denali->clk_x_rate = clk_get_rate(dt->clk_x); - } else { - /* - * Hardcode the clock rates for the backward compatibility. - * This works for both SOCFPGA and UniPhier. - */ - dev_notice(dev, - "necessary clock is missing. default clock rates are used.\n"); - denali->clk_rate = 50000000; - denali->clk_x_rate = 200000000; - } + denali->clk_rate = clk_get_rate(dt->clk); + denali->clk_x_rate = clk_get_rate(dt->clk_x); ret = denali_init(denali); if (ret) diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index c9149a37f8f0..6c7ca41354be 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -965,6 +965,19 @@ static const struct nand_controller_ops fsmc_nand_controller_ops = { .setup_data_interface = fsmc_setup_data_interface, }; +/** + * fsmc_nand_disable() - Disables the NAND bank + * @host: The instance to disable + */ +static void fsmc_nand_disable(struct fsmc_nand_data *host) +{ + u32 val; + + val = readl(host->regs_va + FSMC_PC); + val &= ~FSMC_ENABLE; + writel(val, host->regs_va + FSMC_PC); +} + /* * fsmc_nand_probe - Probe function * @pdev: platform device structure @@ -1120,6 +1133,7 @@ release_dma_read_chan: if (host->mode == USE_DMA_ACCESS) dma_release_channel(host->read_dma_chan); disable_clk: + fsmc_nand_disable(host); clk_disable_unprepare(host->clk); return ret; @@ -1134,6 +1148,7 @@ static int fsmc_nand_remove(struct platform_device *pdev) if (host) { nand_release(&host->nand); + fsmc_nand_disable(host); if (host->mode == USE_DMA_ACCESS) { dma_release_channel(host->write_dma_chan); @@ -1164,6 +1179,7 @@ static int fsmc_nand_resume(struct device *dev) clk_prepare_enable(host->clk); if (host->dev_timings) fsmc_nand_setup(host, host->dev_timings); + nand_reset(&host->nand, 0); } return 0; diff --git a/drivers/mtd/nand/raw/jz4780_bch.c b/drivers/mtd/nand/raw/jz4780_bch.c index 7201827809e9..c5f74ed85862 100644 --- a/drivers/mtd/nand/raw/jz4780_bch.c +++ b/drivers/mtd/nand/raw/jz4780_bch.c @@ -281,12 +281,15 @@ static struct jz4780_bch *jz4780_bch_get(struct device_node *np) struct jz4780_bch *bch; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); - get_device(&pdev->dev); - bch = platform_get_drvdata(pdev); + if (!bch) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + clk_prepare_enable(bch->clk); return bch; diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 84283c6bb0ff..f38e5c1b87e4 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2550,9 +2550,8 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, } /* Alloc the nand chip structure */ - marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) + - (nsels * - sizeof(struct marvell_nand_chip_sel)), + marvell_nand = devm_kzalloc(dev, + struct_size(marvell_nand, sels, nsels), GFP_KERNEL); if (!marvell_nand) { dev_err(dev, "could not allocate chip structure\n"); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c new file mode 100644 index 000000000000..3e8aa71407b5 --- /dev/null +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -0,0 +1,1464 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Amlogic Meson Nand Flash Controller Driver + * + * Copyright (c) 2018 Amlogic, inc. + * Author: Liang Yang <liang.yang@amlogic.com> + */ + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/mtd/rawnand.h> +#include <linux/mtd/mtd.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/iopoll.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/sched/task_stack.h> + +#define NFC_REG_CMD 0x00 +#define NFC_CMD_IDLE (0xc << 14) +#define NFC_CMD_CLE (0x5 << 14) +#define NFC_CMD_ALE (0x6 << 14) +#define NFC_CMD_ADL ((0 << 16) | (3 << 20)) +#define NFC_CMD_ADH ((1 << 16) | (3 << 20)) +#define NFC_CMD_AIL ((2 << 16) | (3 << 20)) +#define NFC_CMD_AIH ((3 << 16) | (3 << 20)) +#define NFC_CMD_SEED ((8 << 16) | (3 << 20)) +#define NFC_CMD_M2N ((0 << 17) | (2 << 20)) +#define NFC_CMD_N2M ((1 << 17) | (2 << 20)) +#define NFC_CMD_RB BIT(20) +#define NFC_CMD_SCRAMBLER_ENABLE BIT(19) +#define NFC_CMD_SCRAMBLER_DISABLE 0 +#define NFC_CMD_SHORTMODE_DISABLE 0 +#define NFC_CMD_RB_INT BIT(14) + +#define NFC_CMD_GET_SIZE(x) (((x) >> 22) & GENMASK(4, 0)) + +#define NFC_REG_CFG 0x04 +#define NFC_REG_DADR 0x08 +#define NFC_REG_IADR 0x0c +#define NFC_REG_BUF 0x10 +#define NFC_REG_INFO 0x14 +#define NFC_REG_DC 0x18 +#define NFC_REG_ADR 0x1c +#define NFC_REG_DL 0x20 +#define NFC_REG_DH 0x24 +#define NFC_REG_CADR 0x28 +#define NFC_REG_SADR 0x2c +#define NFC_REG_PINS 0x30 +#define NFC_REG_VER 0x38 + +#define NFC_RB_IRQ_EN BIT(21) + +#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \ + ( \ + (cmd_dir) | \ + ((ran) << 19) | \ + ((bch) << 14) | \ + ((short_mode) << 13) | \ + (((page_size) & 0x7f) << 6) | \ + ((pages) & 0x3f) \ + ) + +#define GENCMDDADDRL(adl, addr) ((adl) | ((addr) & 0xffff)) +#define GENCMDDADDRH(adh, addr) ((adh) | (((addr) >> 16) & 0xffff)) +#define GENCMDIADDRL(ail, addr) ((ail) | ((addr) & 0xffff)) +#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff)) + +#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N) + +#define ECC_CHECK_RETURN_FF (-1) + +#define NAND_CE0 (0xe << 10) +#define NAND_CE1 (0xd << 10) + +#define DMA_BUSY_TIMEOUT 0x100000 +#define CMD_FIFO_EMPTY_TIMEOUT 1000 + +#define MAX_CE_NUM 2 + +/* eMMC clock register, misc control */ +#define CLK_SELECT_NAND BIT(31) + +#define NFC_CLK_CYCLE 6 + +/* nand flash controller delay 3 ns */ +#define NFC_DEFAULT_DELAY 3000 + +#define ROW_ADDER(page, index) (((page) >> (8 * (index))) & 0xff) +#define MAX_CYCLE_ADDRS 5 +#define DIRREAD 1 +#define DIRWRITE 0 + +#define ECC_PARITY_BCH8_512B 14 +#define ECC_COMPLETE BIT(31) +#define ECC_ERR_CNT(x) (((x) >> 24) & GENMASK(5, 0)) +#define ECC_ZERO_CNT(x) (((x) >> 16) & GENMASK(5, 0)) +#define ECC_UNCORRECTABLE 0x3f + +#define PER_INFO_BYTE 8 + +struct meson_nfc_nand_chip { + struct list_head node; + struct nand_chip nand; + unsigned long clk_rate; + unsigned long level1_divider; + u32 bus_timing; + u32 twb; + u32 tadl; + u32 tbers_max; + + u32 bch_mode; + u8 *data_buf; + __le64 *info_buf; + u32 nsels; + u8 sels[0]; +}; + +struct meson_nand_ecc { + u32 bch; + u32 strength; +}; + +struct meson_nfc_data { + const struct nand_ecc_caps *ecc_caps; +}; + +struct meson_nfc_param { + u32 chip_select; + u32 rb_select; +}; + +struct nand_rw_cmd { + u32 cmd0; + u32 addrs[MAX_CYCLE_ADDRS]; + u32 cmd1; +}; + +struct nand_timing { + u32 twb; + u32 tadl; + u32 tbers_max; +}; + +struct meson_nfc { + struct nand_controller controller; + struct clk *core_clk; + struct clk *device_clk; + struct clk *phase_tx; + struct clk *phase_rx; + + unsigned long clk_rate; + u32 bus_timing; + + struct device *dev; + void __iomem *reg_base; + struct regmap *reg_clk; + struct completion completion; + struct list_head chips; + const struct meson_nfc_data *data; + struct meson_nfc_param param; + struct nand_timing timing; + union { + int cmd[32]; + struct nand_rw_cmd rw; + } cmdfifo; + + dma_addr_t daddr; + dma_addr_t iaddr; + + unsigned long assigned_cs; +}; + +enum { + NFC_ECC_BCH8_1K = 2, + NFC_ECC_BCH24_1K, + NFC_ECC_BCH30_1K, + NFC_ECC_BCH40_1K, + NFC_ECC_BCH50_1K, + NFC_ECC_BCH60_1K, +}; + +#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)} + +static struct meson_nand_ecc meson_ecc[] = { + MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8), + MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24), + MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30), + MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40), + MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50), + MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60), +}; + +static int meson_nand_calc_ecc_bytes(int step_size, int strength) +{ + int ecc_bytes; + + if (step_size == 512 && strength == 8) + return ECC_PARITY_BCH8_512B; + + ecc_bytes = DIV_ROUND_UP(strength * fls(step_size * 8), 8); + ecc_bytes = ALIGN(ecc_bytes, 2); + + return ecc_bytes; +} + +NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps, + meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60); +NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps, + meson_nand_calc_ecc_bytes, 1024, 8); + +static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand) +{ + return container_of(nand, struct meson_nfc_nand_chip, nand); +} + +static void meson_nfc_select_chip(struct nand_chip *nand, int chip) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + int ret, value; + + if (chip < 0 || WARN_ON_ONCE(chip >= meson_chip->nsels)) + return; + + nfc->param.chip_select = meson_chip->sels[chip] ? NAND_CE1 : NAND_CE0; + nfc->param.rb_select = nfc->param.chip_select; + nfc->timing.twb = meson_chip->twb; + nfc->timing.tadl = meson_chip->tadl; + nfc->timing.tbers_max = meson_chip->tbers_max; + + if (nfc->clk_rate != meson_chip->clk_rate) { + ret = clk_set_rate(nfc->device_clk, meson_chip->clk_rate); + if (ret) { + dev_err(nfc->dev, "failed to set clock rate\n"); + return; + } + nfc->clk_rate = meson_chip->clk_rate; + } + if (nfc->bus_timing != meson_chip->bus_timing) { + value = (NFC_CLK_CYCLE - 1) | (meson_chip->bus_timing << 5); + writel(value, nfc->reg_base + NFC_REG_CFG); + writel((1 << 31), nfc->reg_base + NFC_REG_CMD); + nfc->bus_timing = meson_chip->bus_timing; + } +} + +static void meson_nfc_cmd_idle(struct meson_nfc *nfc, u32 time) +{ + writel(nfc->param.chip_select | NFC_CMD_IDLE | (time & 0x3ff), + nfc->reg_base + NFC_REG_CMD); +} + +static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed) +{ + writel(NFC_CMD_SEED | (0xc2 + (seed & 0x7fff)), + nfc->reg_base + NFC_REG_CMD); +} + +static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir, + int scrambler) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + u32 bch = meson_chip->bch_mode, cmd; + int len = mtd->writesize, pagesize, pages; + + pagesize = nand->ecc.size; + + if (raw) { + len = mtd->writesize + mtd->oobsize; + cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + return; + } + + pages = len / nand->ecc.size; + + cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch, + NFC_CMD_SHORTMODE_DISABLE, pagesize, pages); + + writel(cmd, nfc->reg_base + NFC_REG_CMD); +} + +static void meson_nfc_drain_cmd(struct meson_nfc *nfc) +{ + /* + * Insert two commands to make sure all valid commands are finished. + * + * The Nand flash controller is designed as two stages pipleline - + * a) fetch and b) excute. + * There might be cases when the driver see command queue is empty, + * but the Nand flash controller still has two commands buffered, + * one is fetched into NFC request queue (ready to run), and another + * is actively executing. So pushing 2 "IDLE" commands guarantees that + * the pipeline is emptied. + */ + meson_nfc_cmd_idle(nfc, 0); + meson_nfc_cmd_idle(nfc, 0); +} + +static int meson_nfc_wait_cmd_finish(struct meson_nfc *nfc, + unsigned int timeout_ms) +{ + u32 cmd_size = 0; + int ret; + + /* wait cmd fifo is empty */ + ret = readl_relaxed_poll_timeout(nfc->reg_base + NFC_REG_CMD, cmd_size, + !NFC_CMD_GET_SIZE(cmd_size), + 10, timeout_ms * 1000); + if (ret) + dev_err(nfc->dev, "wait for empty CMD FIFO time out\n"); + + return ret; +} + +static int meson_nfc_wait_dma_finish(struct meson_nfc *nfc) +{ + meson_nfc_drain_cmd(nfc); + + return meson_nfc_wait_cmd_finish(nfc, DMA_BUSY_TIMEOUT); +} + +static u8 *meson_nfc_oob_ptr(struct nand_chip *nand, int i) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + int len; + + len = nand->ecc.size * (i + 1) + (nand->ecc.bytes + 2) * i; + + return meson_chip->data_buf + len; +} + +static u8 *meson_nfc_data_ptr(struct nand_chip *nand, int i) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + int len, temp; + + temp = nand->ecc.size + nand->ecc.bytes; + len = (temp + 2) * i; + + return meson_chip->data_buf + len; +} + +static void meson_nfc_get_data_oob(struct nand_chip *nand, + u8 *buf, u8 *oobbuf) +{ + int i, oob_len = 0; + u8 *dsrc, *osrc; + + oob_len = nand->ecc.bytes + 2; + for (i = 0; i < nand->ecc.steps; i++) { + if (buf) { + dsrc = meson_nfc_data_ptr(nand, i); + memcpy(buf, dsrc, nand->ecc.size); + buf += nand->ecc.size; + } + osrc = meson_nfc_oob_ptr(nand, i); + memcpy(oobbuf, osrc, oob_len); + oobbuf += oob_len; + } +} + +static void meson_nfc_set_data_oob(struct nand_chip *nand, + const u8 *buf, u8 *oobbuf) +{ + int i, oob_len = 0; + u8 *dsrc, *osrc; + + oob_len = nand->ecc.bytes + 2; + for (i = 0; i < nand->ecc.steps; i++) { + if (buf) { + dsrc = meson_nfc_data_ptr(nand, i); + memcpy(dsrc, buf, nand->ecc.size); + buf += nand->ecc.size; + } + osrc = meson_nfc_oob_ptr(nand, i); + memcpy(osrc, oobbuf, oob_len); + oobbuf += oob_len; + } +} + +static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms) +{ + u32 cmd, cfg; + int ret = 0; + + meson_nfc_cmd_idle(nfc, nfc->timing.twb); + meson_nfc_drain_cmd(nfc); + meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT); + + cfg = readl(nfc->reg_base + NFC_REG_CFG); + cfg |= NFC_RB_IRQ_EN; + writel(cfg, nfc->reg_base + NFC_REG_CFG); + + init_completion(&nfc->completion); + + /* use the max erase time as the maximum clock for waiting R/B */ + cmd = NFC_CMD_RB | NFC_CMD_RB_INT + | nfc->param.chip_select | nfc->timing.tbers_max; + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + ret = wait_for_completion_timeout(&nfc->completion, + msecs_to_jiffies(timeout_ms)); + if (ret == 0) + ret = -1; + + return ret; +} + +static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + __le64 *info; + int i, count; + + for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) { + info = &meson_chip->info_buf[i]; + *info |= oob_buf[count]; + *info |= oob_buf[count + 1] << 8; + } +} + +static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + __le64 *info; + int i, count; + + for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) { + info = &meson_chip->info_buf[i]; + oob_buf[count] = *info; + oob_buf[count + 1] = *info >> 8; + } +} + +static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips, + u64 *correct_bitmap) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + __le64 *info; + int ret = 0, i; + + for (i = 0; i < nand->ecc.steps; i++) { + info = &meson_chip->info_buf[i]; + if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) { + mtd->ecc_stats.corrected += ECC_ERR_CNT(*info); + *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info)); + *correct_bitmap |= 1 >> i; + continue; + } + if ((nand->options & NAND_NEED_SCRAMBLING) && + ECC_ZERO_CNT(*info) < nand->ecc.strength) { + mtd->ecc_stats.corrected += ECC_ZERO_CNT(*info); + *bitflips = max_t(u32, *bitflips, + ECC_ZERO_CNT(*info)); + ret = ECC_CHECK_RETURN_FF; + } else { + ret = -EBADMSG; + } + } + return ret; +} + +static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, u8 *databuf, + int datalen, u8 *infobuf, int infolen, + enum dma_data_direction dir) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + u32 cmd; + int ret = 0; + + nfc->daddr = dma_map_single(nfc->dev, (void *)databuf, datalen, dir); + ret = dma_mapping_error(nfc->dev, nfc->daddr); + if (ret) { + dev_err(nfc->dev, "DMA mapping error\n"); + return ret; + } + cmd = GENCMDDADDRL(NFC_CMD_ADL, nfc->daddr); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + cmd = GENCMDDADDRH(NFC_CMD_ADH, nfc->daddr); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + if (infobuf) { + nfc->iaddr = dma_map_single(nfc->dev, infobuf, infolen, dir); + ret = dma_mapping_error(nfc->dev, nfc->iaddr); + if (ret) { + dev_err(nfc->dev, "DMA mapping error\n"); + dma_unmap_single(nfc->dev, + nfc->daddr, datalen, dir); + return ret; + } + cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + cmd = GENCMDIADDRH(NFC_CMD_AIH, nfc->iaddr); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + } + + return ret; +} + +static void meson_nfc_dma_buffer_release(struct nand_chip *nand, + int infolen, int datalen, + enum dma_data_direction dir) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + + dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir); + if (infolen) + dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir); +} + +static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + int ret = 0; + u32 cmd; + u8 *info; + + info = kzalloc(PER_INFO_BYTE, GFP_KERNEL); + ret = meson_nfc_dma_buffer_setup(nand, buf, len, info, + PER_INFO_BYTE, DMA_FROM_DEVICE); + if (ret) + return ret; + + cmd = NFC_CMD_N2M | (len & GENMASK(5, 0)); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + meson_nfc_drain_cmd(nfc); + meson_nfc_wait_cmd_finish(nfc, 1000); + meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE); + kfree(info); + + return ret; +} + +static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + int ret = 0; + u32 cmd; + + ret = meson_nfc_dma_buffer_setup(nand, buf, len, NULL, + 0, DMA_TO_DEVICE); + if (ret) + return ret; + + cmd = NFC_CMD_M2N | (len & GENMASK(5, 0)); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + meson_nfc_drain_cmd(nfc); + meson_nfc_wait_cmd_finish(nfc, 1000); + meson_nfc_dma_buffer_release(nand, len, 0, DMA_TO_DEVICE); + + return ret; +} + +static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, + int page, bool in) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(&nand->data_interface); + u32 *addrs = nfc->cmdfifo.rw.addrs; + u32 cs = nfc->param.chip_select; + u32 cmd0, cmd_num, row_start; + int ret = 0, i; + + cmd_num = sizeof(struct nand_rw_cmd) / sizeof(int); + + cmd0 = in ? NAND_CMD_READ0 : NAND_CMD_SEQIN; + nfc->cmdfifo.rw.cmd0 = cs | NFC_CMD_CLE | cmd0; + + addrs[0] = cs | NFC_CMD_ALE | 0; + if (mtd->writesize <= 512) { + cmd_num--; + row_start = 1; + } else { + addrs[1] = cs | NFC_CMD_ALE | 0; + row_start = 2; + } + + addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0); + addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1); + + if (nand->options & NAND_ROW_ADDR_3) + addrs[row_start + 2] = + cs | NFC_CMD_ALE | ROW_ADDER(page, 2); + else + cmd_num--; + + /* subtract cmd1 */ + cmd_num--; + + for (i = 0; i < cmd_num; i++) + writel_relaxed(nfc->cmdfifo.cmd[i], + nfc->reg_base + NFC_REG_CMD); + + if (in) { + nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART; + writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD); + meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max)); + } else { + meson_nfc_cmd_idle(nfc, nfc->timing.tadl); + } + + return ret; +} + +static int meson_nfc_write_page_sub(struct nand_chip *nand, + int page, int raw) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(&nand->data_interface); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + int data_len, info_len; + u32 cmd; + int ret; + + meson_nfc_select_chip(nand, nand->cur_cs); + + data_len = mtd->writesize + mtd->oobsize; + info_len = nand->ecc.steps * PER_INFO_BYTE; + + ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRWRITE); + if (ret) + return ret; + + ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf, + data_len, (u8 *)meson_chip->info_buf, + info_len, DMA_TO_DEVICE); + if (ret) + return ret; + + if (nand->options & NAND_NEED_SCRAMBLING) { + meson_nfc_cmd_seed(nfc, page); + meson_nfc_cmd_access(nand, raw, DIRWRITE, + NFC_CMD_SCRAMBLER_ENABLE); + } else { + meson_nfc_cmd_access(nand, raw, DIRWRITE, + NFC_CMD_SCRAMBLER_DISABLE); + } + + cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG; + writel(cmd, nfc->reg_base + NFC_REG_CMD); + meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max)); + + meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE); + + return ret; +} + +static int meson_nfc_write_page_raw(struct nand_chip *nand, const u8 *buf, + int oob_required, int page) +{ + u8 *oob_buf = nand->oob_poi; + + meson_nfc_set_data_oob(nand, buf, oob_buf); + + return meson_nfc_write_page_sub(nand, page, 1); +} + +static int meson_nfc_write_page_hwecc(struct nand_chip *nand, + const u8 *buf, int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + u8 *oob_buf = nand->oob_poi; + + memcpy(meson_chip->data_buf, buf, mtd->writesize); + memset(meson_chip->info_buf, 0, nand->ecc.steps * PER_INFO_BYTE); + meson_nfc_set_user_byte(nand, oob_buf); + + return meson_nfc_write_page_sub(nand, page, 0); +} + +static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc, + struct nand_chip *nand, int raw) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + __le64 *info; + u32 neccpages; + int ret; + + neccpages = raw ? 1 : nand->ecc.steps; + info = &meson_chip->info_buf[neccpages - 1]; + do { + usleep_range(10, 15); + /* info is updated by nfc dma engine*/ + smp_rmb(); + ret = *info & ECC_COMPLETE; + } while (!ret); +} + +static int meson_nfc_read_page_sub(struct nand_chip *nand, + int page, int raw) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + int data_len, info_len; + int ret; + + meson_nfc_select_chip(nand, nand->cur_cs); + + data_len = mtd->writesize + mtd->oobsize; + info_len = nand->ecc.steps * PER_INFO_BYTE; + + ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRREAD); + if (ret) + return ret; + + ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf, + data_len, (u8 *)meson_chip->info_buf, + info_len, DMA_FROM_DEVICE); + if (ret) + return ret; + + if (nand->options & NAND_NEED_SCRAMBLING) { + meson_nfc_cmd_seed(nfc, page); + meson_nfc_cmd_access(nand, raw, DIRREAD, + NFC_CMD_SCRAMBLER_ENABLE); + } else { + meson_nfc_cmd_access(nand, raw, DIRREAD, + NFC_CMD_SCRAMBLER_DISABLE); + } + + ret = meson_nfc_wait_dma_finish(nfc); + meson_nfc_check_ecc_pages_valid(nfc, nand, raw); + + meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_FROM_DEVICE); + + return ret; +} + +static int meson_nfc_read_page_raw(struct nand_chip *nand, u8 *buf, + int oob_required, int page) +{ + u8 *oob_buf = nand->oob_poi; + int ret; + + ret = meson_nfc_read_page_sub(nand, page, 1); + if (ret) + return ret; + + meson_nfc_get_data_oob(nand, buf, oob_buf); + + return 0; +} + +static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; + u64 correct_bitmap = 0; + u32 bitflips = 0; + u8 *oob_buf = nand->oob_poi; + int ret, i; + + ret = meson_nfc_read_page_sub(nand, page, 0); + if (ret) + return ret; + + meson_nfc_get_user_byte(nand, oob_buf); + ret = meson_nfc_ecc_correct(nand, &bitflips, &correct_bitmap); + if (ret == ECC_CHECK_RETURN_FF) { + if (buf) + memset(buf, 0xff, mtd->writesize); + memset(oob_buf, 0xff, mtd->oobsize); + } else if (ret < 0) { + if ((nand->options & NAND_NEED_SCRAMBLING) || !buf) { + mtd->ecc_stats.failed++; + return bitflips; + } + ret = meson_nfc_read_page_raw(nand, buf, 0, page); + if (ret) + return ret; + + for (i = 0; i < nand->ecc.steps ; i++) { + u8 *data = buf + i * ecc->size; + u8 *oob = nand->oob_poi + i * (ecc->bytes + 2); + + if (correct_bitmap & (1 << i)) + continue; + ret = nand_check_erased_ecc_chunk(data, ecc->size, + oob, ecc->bytes + 2, + NULL, 0, + ecc->strength); + if (ret < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += ret; + bitflips = max_t(u32, bitflips, ret); + } + } + } else if (buf && buf != meson_chip->data_buf) { + memcpy(buf, meson_chip->data_buf, mtd->writesize); + } + + return bitflips; +} + +static int meson_nfc_read_oob_raw(struct nand_chip *nand, int page) +{ + return meson_nfc_read_page_raw(nand, NULL, 1, page); +} + +static int meson_nfc_read_oob(struct nand_chip *nand, int page) +{ + return meson_nfc_read_page_hwecc(nand, NULL, 1, page); +} + +static bool meson_nfc_is_buffer_dma_safe(const void *buffer) +{ + if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer))) + return true; + return false; +} + +static void * +meson_nand_op_get_dma_safe_input_buf(const struct nand_op_instr *instr) +{ + if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR)) + return NULL; + + if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.in)) + return instr->ctx.data.buf.in; + + return kzalloc(instr->ctx.data.len, GFP_KERNEL); +} + +static void +meson_nand_op_put_dma_safe_input_buf(const struct nand_op_instr *instr, + void *buf) +{ + if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR) || + WARN_ON(!buf)) + return; + + if (buf == instr->ctx.data.buf.in) + return; + + memcpy(instr->ctx.data.buf.in, buf, instr->ctx.data.len); + kfree(buf); +} + +static void * +meson_nand_op_get_dma_safe_output_buf(const struct nand_op_instr *instr) +{ + if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR)) + return NULL; + + if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.out)) + return (void *)instr->ctx.data.buf.out; + + return kmemdup(instr->ctx.data.buf.out, + instr->ctx.data.len, GFP_KERNEL); +} + +static void +meson_nand_op_put_dma_safe_output_buf(const struct nand_op_instr *instr, + const void *buf) +{ + if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR) || + WARN_ON(!buf)) + return; + + if (buf != instr->ctx.data.buf.out) + kfree(buf); +} + +static int meson_nfc_exec_op(struct nand_chip *nand, + const struct nand_operation *op, bool check_only) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + const struct nand_op_instr *instr = NULL; + void *buf; + u32 op_id, delay_idle, cmd; + int i; + + meson_nfc_select_chip(nand, op->cs); + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + delay_idle = DIV_ROUND_UP(PSEC_TO_NSEC(instr->delay_ns), + meson_chip->level1_divider * + NFC_CLK_CYCLE); + switch (instr->type) { + case NAND_OP_CMD_INSTR: + cmd = nfc->param.chip_select | NFC_CMD_CLE; + cmd |= instr->ctx.cmd.opcode & 0xff; + writel(cmd, nfc->reg_base + NFC_REG_CMD); + meson_nfc_cmd_idle(nfc, delay_idle); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + cmd = nfc->param.chip_select | NFC_CMD_ALE; + cmd |= instr->ctx.addr.addrs[i] & 0xff; + writel(cmd, nfc->reg_base + NFC_REG_CMD); + } + meson_nfc_cmd_idle(nfc, delay_idle); + break; + + case NAND_OP_DATA_IN_INSTR: + buf = meson_nand_op_get_dma_safe_input_buf(instr); + if (!buf) + return -ENOMEM; + meson_nfc_read_buf(nand, buf, instr->ctx.data.len); + meson_nand_op_put_dma_safe_input_buf(instr, buf); + break; + + case NAND_OP_DATA_OUT_INSTR: + buf = meson_nand_op_get_dma_safe_output_buf(instr); + if (!buf) + return -ENOMEM; + meson_nfc_write_buf(nand, buf, instr->ctx.data.len); + meson_nand_op_put_dma_safe_output_buf(instr, buf); + break; + + case NAND_OP_WAITRDY_INSTR: + meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms); + if (instr->delay_ns) + meson_nfc_cmd_idle(nfc, delay_idle); + break; + } + } + meson_nfc_wait_cmd_finish(nfc, 1000); + return 0; +} + +static int meson_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + + if (section >= nand->ecc.steps) + return -ERANGE; + + oobregion->offset = 2 + (section * (2 + nand->ecc.bytes)); + oobregion->length = nand->ecc.bytes; + + return 0; +} + +static int meson_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + + if (section >= nand->ecc.steps) + return -ERANGE; + + oobregion->offset = section * (2 + nand->ecc.bytes); + oobregion->length = 2; + + return 0; +} + +static const struct mtd_ooblayout_ops meson_ooblayout_ops = { + .ecc = meson_ooblayout_ecc, + .free = meson_ooblayout_free, +}; + +static int meson_nfc_clk_init(struct meson_nfc *nfc) +{ + int ret; + + /* request core clock */ + nfc->core_clk = devm_clk_get(nfc->dev, "core"); + if (IS_ERR(nfc->core_clk)) { + dev_err(nfc->dev, "failed to get core clock\n"); + return PTR_ERR(nfc->core_clk); + } + + nfc->device_clk = devm_clk_get(nfc->dev, "device"); + if (IS_ERR(nfc->device_clk)) { + dev_err(nfc->dev, "failed to get device clock\n"); + return PTR_ERR(nfc->device_clk); + } + + nfc->phase_tx = devm_clk_get(nfc->dev, "tx"); + if (IS_ERR(nfc->phase_tx)) { + dev_err(nfc->dev, "failed to get TX clk\n"); + return PTR_ERR(nfc->phase_tx); + } + + nfc->phase_rx = devm_clk_get(nfc->dev, "rx"); + if (IS_ERR(nfc->phase_rx)) { + dev_err(nfc->dev, "failed to get RX clk\n"); + return PTR_ERR(nfc->phase_rx); + } + + /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ + regmap_update_bits(nfc->reg_clk, + 0, CLK_SELECT_NAND, CLK_SELECT_NAND); + + ret = clk_prepare_enable(nfc->core_clk); + if (ret) { + dev_err(nfc->dev, "failed to enable core clock\n"); + return ret; + } + + ret = clk_prepare_enable(nfc->device_clk); + if (ret) { + dev_err(nfc->dev, "failed to enable device clock\n"); + goto err_device_clk; + } + + ret = clk_prepare_enable(nfc->phase_tx); + if (ret) { + dev_err(nfc->dev, "failed to enable TX clock\n"); + goto err_phase_tx; + } + + ret = clk_prepare_enable(nfc->phase_rx); + if (ret) { + dev_err(nfc->dev, "failed to enable RX clock\n"); + goto err_phase_rx; + } + + ret = clk_set_rate(nfc->device_clk, 24000000); + if (ret) + goto err_phase_rx; + + return 0; +err_phase_rx: + clk_disable_unprepare(nfc->phase_tx); +err_phase_tx: + clk_disable_unprepare(nfc->device_clk); +err_device_clk: + clk_disable_unprepare(nfc->core_clk); + return ret; +} + +static void meson_nfc_disable_clk(struct meson_nfc *nfc) +{ + clk_disable_unprepare(nfc->phase_rx); + clk_disable_unprepare(nfc->phase_tx); + clk_disable_unprepare(nfc->device_clk); + clk_disable_unprepare(nfc->core_clk); +} + +static void meson_nfc_free_buffer(struct nand_chip *nand) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + + kfree(meson_chip->info_buf); + kfree(meson_chip->data_buf); +} + +static int meson_chip_buffer_init(struct nand_chip *nand) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + u32 page_bytes, info_bytes, nsectors; + + nsectors = mtd->writesize / nand->ecc.size; + + page_bytes = mtd->writesize + mtd->oobsize; + info_bytes = nsectors * PER_INFO_BYTE; + + meson_chip->data_buf = kmalloc(page_bytes, GFP_KERNEL); + if (!meson_chip->data_buf) + return -ENOMEM; + + meson_chip->info_buf = kmalloc(info_bytes, GFP_KERNEL); + if (!meson_chip->info_buf) { + kfree(meson_chip->data_buf); + return -ENOMEM; + } + + return 0; +} + +static +int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline, + const struct nand_data_interface *conf) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + const struct nand_sdr_timings *timings; + u32 div, bt_min, bt_max, tbers_clocks; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return -ENOTSUPP; + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + div = DIV_ROUND_UP((timings->tRC_min / 1000), NFC_CLK_CYCLE); + bt_min = (timings->tREA_max + NFC_DEFAULT_DELAY) / div; + bt_max = (NFC_DEFAULT_DELAY + timings->tRHOH_min + + timings->tRC_min / 2) / div; + + meson_chip->twb = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tWB_max), + div * NFC_CLK_CYCLE); + meson_chip->tadl = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tADL_min), + div * NFC_CLK_CYCLE); + tbers_clocks = DIV_ROUND_UP_ULL(PSEC_TO_NSEC(timings->tBERS_max), + div * NFC_CLK_CYCLE); + meson_chip->tbers_max = ilog2(tbers_clocks); + if (!is_power_of_2(tbers_clocks)) + meson_chip->tbers_max++; + + bt_min = DIV_ROUND_UP(bt_min, 1000); + bt_max = DIV_ROUND_UP(bt_max, 1000); + + if (bt_max < bt_min) + return -EINVAL; + + meson_chip->level1_divider = div; + meson_chip->clk_rate = 1000000000 / meson_chip->level1_divider; + meson_chip->bus_timing = (bt_min + bt_max) / 2 + 1; + + return 0; +} + +static int meson_nand_bch_mode(struct nand_chip *nand) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + int i; + + if (nand->ecc.strength > 60 || nand->ecc.strength < 8) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) { + if (meson_ecc[i].strength == nand->ecc.strength) { + meson_chip->bch_mode = meson_ecc[i].bch; + return 0; + } + } + + return -EINVAL; +} + +static void meson_nand_detach_chip(struct nand_chip *nand) +{ + meson_nfc_free_buffer(nand); +} + +static int meson_nand_attach_chip(struct nand_chip *nand) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct mtd_info *mtd = nand_to_mtd(nand); + int nsectors = mtd->writesize / 1024; + int ret; + + if (!mtd->name) { + mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, + "%s:nand%d", + dev_name(nfc->dev), + meson_chip->sels[0]); + if (!mtd->name) + return -ENOMEM; + } + + if (nand->bbt_options & NAND_BBT_USE_FLASH) + nand->bbt_options |= NAND_BBT_NO_OOB; + + nand->options |= NAND_NO_SUBPAGE_WRITE; + + ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps, + mtd->oobsize - 2 * nsectors); + if (ret) { + dev_err(nfc->dev, "failed to ECC init\n"); + return -EINVAL; + } + + ret = meson_nand_bch_mode(nand); + if (ret) + return -EINVAL; + + nand->ecc.mode = NAND_ECC_HW; + nand->ecc.write_page_raw = meson_nfc_write_page_raw; + nand->ecc.write_page = meson_nfc_write_page_hwecc; + nand->ecc.write_oob_raw = nand_write_oob_std; + nand->ecc.write_oob = nand_write_oob_std; + + nand->ecc.read_page_raw = meson_nfc_read_page_raw; + nand->ecc.read_page = meson_nfc_read_page_hwecc; + nand->ecc.read_oob_raw = meson_nfc_read_oob_raw; + nand->ecc.read_oob = meson_nfc_read_oob; + + if (nand->options & NAND_BUSWIDTH_16) { + dev_err(nfc->dev, "16bits bus width not supported"); + return -EINVAL; + } + ret = meson_chip_buffer_init(nand); + if (ret) + return -ENOMEM; + + return ret; +} + +static const struct nand_controller_ops meson_nand_controller_ops = { + .attach_chip = meson_nand_attach_chip, + .detach_chip = meson_nand_detach_chip, + .setup_data_interface = meson_nfc_setup_data_interface, + .exec_op = meson_nfc_exec_op, +}; + +static int +meson_nfc_nand_chip_init(struct device *dev, + struct meson_nfc *nfc, struct device_node *np) +{ + struct meson_nfc_nand_chip *meson_chip; + struct nand_chip *nand; + struct mtd_info *mtd; + int ret, i; + u32 tmp, nsels; + + if (!of_get_property(np, "reg", &nsels)) + return -EINVAL; + + nsels /= sizeof(u32); + if (!nsels || nsels > MAX_CE_NUM) { + dev_err(dev, "invalid register property size\n"); + return -EINVAL; + } + + meson_chip = devm_kzalloc(dev, + sizeof(*meson_chip) + (nsels * sizeof(u8)), + GFP_KERNEL); + if (!meson_chip) + return -ENOMEM; + + meson_chip->nsels = nsels; + + for (i = 0; i < nsels; i++) { + ret = of_property_read_u32_index(np, "reg", i, &tmp); + if (ret) { + dev_err(dev, "could not retrieve register property: %d\n", + ret); + return ret; + } + + if (test_and_set_bit(tmp, &nfc->assigned_cs)) { + dev_err(dev, "CS %d already assigned\n", tmp); + return -EINVAL; + } + } + + nand = &meson_chip->nand; + nand->controller = &nfc->controller; + nand->controller->ops = &meson_nand_controller_ops; + nand_set_flash_node(nand, np); + nand_set_controller_data(nand, nfc); + + nand->options |= NAND_USE_BOUNCE_BUFFER; + mtd = nand_to_mtd(nand); + mtd->owner = THIS_MODULE; + mtd->dev.parent = dev; + + ret = nand_scan(nand, nsels); + if (ret) + return ret; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(dev, "failed to register MTD device: %d\n", ret); + nand_cleanup(nand); + return ret; + } + + list_add_tail(&meson_chip->node, &nfc->chips); + + return 0; +} + +static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc) +{ + struct meson_nfc_nand_chip *meson_chip; + struct mtd_info *mtd; + int ret; + + while (!list_empty(&nfc->chips)) { + meson_chip = list_first_entry(&nfc->chips, + struct meson_nfc_nand_chip, node); + mtd = nand_to_mtd(&meson_chip->nand); + ret = mtd_device_unregister(mtd); + if (ret) + return ret; + + meson_nfc_free_buffer(&meson_chip->nand); + nand_cleanup(&meson_chip->nand); + list_del(&meson_chip->node); + } + + return 0; +} + +static int meson_nfc_nand_chips_init(struct device *dev, + struct meson_nfc *nfc) +{ + struct device_node *np = dev->of_node; + struct device_node *nand_np; + int ret; + + for_each_child_of_node(np, nand_np) { + ret = meson_nfc_nand_chip_init(dev, nfc, nand_np); + if (ret) { + meson_nfc_nand_chip_cleanup(nfc); + return ret; + } + } + + return 0; +} + +static irqreturn_t meson_nfc_irq(int irq, void *id) +{ + struct meson_nfc *nfc = id; + u32 cfg; + + cfg = readl(nfc->reg_base + NFC_REG_CFG); + if (!(cfg & NFC_RB_IRQ_EN)) + return IRQ_NONE; + + cfg &= ~(NFC_RB_IRQ_EN); + writel(cfg, nfc->reg_base + NFC_REG_CFG); + + complete(&nfc->completion); + return IRQ_HANDLED; +} + +static const struct meson_nfc_data meson_gxl_data = { + .ecc_caps = &meson_gxl_ecc_caps, +}; + +static const struct meson_nfc_data meson_axg_data = { + .ecc_caps = &meson_axg_ecc_caps, +}; + +static const struct of_device_id meson_nfc_id_table[] = { + { + .compatible = "amlogic,meson-gxl-nfc", + .data = &meson_gxl_data, + }, { + .compatible = "amlogic,meson-axg-nfc", + .data = &meson_axg_data, + }, + {} +}; +MODULE_DEVICE_TABLE(of, meson_nfc_id_table); + +static int meson_nfc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct meson_nfc *nfc; + struct resource *res; + int ret, irq; + + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->data = of_device_get_match_data(&pdev->dev); + if (!nfc->data) + return -ENODEV; + + nand_controller_init(&nfc->controller); + INIT_LIST_HEAD(&nfc->chips); + + nfc->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nfc->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->reg_base)) + return PTR_ERR(nfc->reg_base); + + nfc->reg_clk = + syscon_regmap_lookup_by_phandle(dev->of_node, + "amlogic,mmc-syscon"); + if (IS_ERR(nfc->reg_clk)) { + dev_err(dev, "Failed to lookup clock base\n"); + return PTR_ERR(nfc->reg_clk); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "no NFC IRQ resource\n"); + return -EINVAL; + } + + ret = meson_nfc_clk_init(nfc); + if (ret) { + dev_err(dev, "failed to initialize NAND clock\n"); + return ret; + } + + writel(0, nfc->reg_base + NFC_REG_CFG); + ret = devm_request_irq(dev, irq, meson_nfc_irq, 0, dev_name(dev), nfc); + if (ret) { + dev_err(dev, "failed to request NFC IRQ\n"); + ret = -EINVAL; + goto err_clk; + } + + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "failed to set DMA mask\n"); + goto err_clk; + } + + platform_set_drvdata(pdev, nfc); + + ret = meson_nfc_nand_chips_init(dev, nfc); + if (ret) { + dev_err(dev, "failed to init NAND chips\n"); + goto err_clk; + } + + return 0; +err_clk: + meson_nfc_disable_clk(nfc); + return ret; +} + +static int meson_nfc_remove(struct platform_device *pdev) +{ + struct meson_nfc *nfc = platform_get_drvdata(pdev); + int ret; + + ret = meson_nfc_nand_chip_cleanup(nfc); + if (ret) + return ret; + + meson_nfc_disable_clk(nfc); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver meson_nfc_driver = { + .probe = meson_nfc_probe, + .remove = meson_nfc_remove, + .driver = { + .name = "meson-nand", + .of_match_table = meson_nfc_id_table, + }, +}; +module_platform_driver(meson_nfc_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Liang Yang <liang.yang@amlogic.com>"); +MODULE_DESCRIPTION("Amlogic's Meson NAND Flash Controller driver"); diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c index 6432bd70c3b3..05b0c19d72d9 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.c +++ b/drivers/mtd/nand/raw/mtk_ecc.c @@ -267,11 +267,15 @@ static struct mtk_ecc *mtk_ecc_get(struct device_node *np) struct mtk_ecc *ecc; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); - get_device(&pdev->dev); ecc = platform_get_drvdata(pdev); + if (!ecc) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + clk_prepare_enable(ecc->clk); mtk_ecc_hw_init(ecc); diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index b6b4602f5132..2c0e09187773 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1451,8 +1451,7 @@ static int mtk_nfc_probe(struct platform_device *pdev) if (!nfc) return -ENOMEM; - spin_lock_init(&nfc->controller.lock); - init_waitqueue_head(&nfc->controller.wq); + nand_controller_init(&nfc->controller); INIT_LIST_HEAD(&nfc->chips); nfc->controller.ops = &mtk_nfc_controller_ops; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 839494ac457c..ddd396e93e32 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -278,11 +278,8 @@ EXPORT_SYMBOL_GPL(nand_deselect_target); static void nand_release_device(struct nand_chip *chip) { /* Release the controller and the chip */ - spin_lock(&chip->controller->lock); - chip->controller->active = NULL; - chip->state = FL_READY; - wake_up(&chip->controller->wq); - spin_unlock(&chip->controller->lock); + mutex_unlock(&chip->controller->lock); + mutex_unlock(&chip->lock); } /** @@ -331,57 +328,23 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) } /** - * panic_nand_get_device - [GENERIC] Get chip for selected access - * @chip: the nand chip descriptor - * @new_state: the state which is requested - * - * Used when in panic, no locks are taken. - */ -static void panic_nand_get_device(struct nand_chip *chip, int new_state) -{ - /* Hardware controller shared among independent devices */ - chip->controller->active = chip; - chip->state = new_state; -} - -/** * nand_get_device - [GENERIC] Get chip for selected access * @chip: NAND chip structure - * @new_state: the state which is requested * - * Get the device and lock it for exclusive access + * Lock the device and its controller for exclusive access + * + * Return: -EBUSY if the chip has been suspended, 0 otherwise */ -static int -nand_get_device(struct nand_chip *chip, int new_state) +static int nand_get_device(struct nand_chip *chip) { - spinlock_t *lock = &chip->controller->lock; - wait_queue_head_t *wq = &chip->controller->wq; - DECLARE_WAITQUEUE(wait, current); -retry: - spin_lock(lock); - - /* Hardware controller shared among independent devices */ - if (!chip->controller->active) - chip->controller->active = chip; - - if (chip->controller->active == chip && chip->state == FL_READY) { - chip->state = new_state; - spin_unlock(lock); - return 0; - } - if (new_state == FL_PM_SUSPENDED) { - if (chip->controller->active->state == FL_PM_SUSPENDED) { - chip->state = FL_PM_SUSPENDED; - spin_unlock(lock); - return 0; - } + mutex_lock(&chip->lock); + if (chip->suspended) { + mutex_unlock(&chip->lock); + return -EBUSY; } - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(wq, &wait); - spin_unlock(lock); - schedule(); - remove_wait_queue(wq, &wait); - goto retry; + mutex_lock(&chip->controller->lock); + + return 0; } /** @@ -458,7 +421,7 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to, struct mtd_oob_ops *ops) { struct mtd_info *mtd = nand_to_mtd(chip); - int chipnr, page, status, len; + int chipnr, page, status, len, ret; pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, (int)ops->ooblen); @@ -480,7 +443,9 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to, * if we don't do this. I have no clue why, but I seem to have 'fixed' * it in the doc2000 driver in August 1999. dwmw2. */ - nand_reset(chip, chipnr); + ret = nand_reset(chip, chipnr); + if (ret) + return ret; nand_select_target(chip, chipnr); @@ -603,7 +568,10 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) nand_erase_nand(chip, &einfo, 0); /* Write bad block marker to OOB */ - nand_get_device(chip, FL_WRITING); + ret = nand_get_device(chip); + if (ret) + return ret; + ret = nand_markbad_bbm(chip, ofs); nand_release_device(chip); } @@ -3581,7 +3549,9 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, ops->mode != MTD_OPS_RAW) return -ENOTSUPP; - nand_get_device(chip, FL_READING); + ret = nand_get_device(chip); + if (ret) + return ret; if (!ops->datbuf) ret = nand_do_read_oob(chip, from, ops); @@ -4100,9 +4070,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, struct mtd_oob_ops ops; int ret; - /* Grab the device */ - panic_nand_get_device(chip, FL_WRITING); - nand_select_target(chip, chipnr); /* Wait for the device to get ready */ @@ -4133,7 +4100,9 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, ops->retlen = 0; - nand_get_device(chip, FL_WRITING); + ret = nand_get_device(chip); + if (ret) + return ret; switch (ops->mode) { case MTD_OPS_PLACE_OOB: @@ -4156,23 +4125,6 @@ out: } /** - * single_erase - [GENERIC] NAND standard block erase command function - * @chip: NAND chip object - * @page: the page address of the block which will be erased - * - * Standard erase command for NAND chips. Returns NAND status. - */ -static int single_erase(struct nand_chip *chip, int page) -{ - unsigned int eraseblock; - - /* Send commands to erase a block */ - eraseblock = page >> (chip->phys_erase_shift - chip->page_shift); - - return nand_erase_op(chip, eraseblock); -} - -/** * nand_erase - [MTD Interface] erase block(s) * @mtd: MTD device structure * @instr: erase instruction @@ -4195,7 +4147,7 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, int allowbbt) { - int page, status, pages_per_block, ret, chipnr; + int page, pages_per_block, ret, chipnr; loff_t len; pr_debug("%s: start = 0x%012llx, len = %llu\n", @@ -4206,7 +4158,9 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, return -EINVAL; /* Grab the lock and see if the device is available */ - nand_get_device(chip, FL_ERASING); + ret = nand_get_device(chip); + if (ret) + return ret; /* Shift to get first page */ page = (int)(instr->addr >> chip->page_shift); @@ -4247,17 +4201,11 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, (page + pages_per_block)) chip->pagebuf = -1; - if (chip->legacy.erase) - status = chip->legacy.erase(chip, - page & chip->pagemask); - else - status = single_erase(chip, page & chip->pagemask); - - /* See if block erase succeeded */ - if (status) { + ret = nand_erase_op(chip, (page & chip->pagemask) >> + (chip->phys_erase_shift - chip->page_shift)); + if (ret) { pr_debug("%s: failed erase, page 0x%08x\n", __func__, page); - ret = -EIO; instr->fail_addr = ((loff_t)page << chip->page_shift); goto erase_exit; @@ -4299,7 +4247,7 @@ static void nand_sync(struct mtd_info *mtd) pr_debug("%s: called\n", __func__); /* Grab the lock and see if the device is available */ - nand_get_device(chip, FL_SYNCING); + WARN_ON(nand_get_device(chip)); /* Release it and go back */ nand_release_device(chip); } @@ -4316,7 +4264,10 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) int ret; /* Select the NAND device */ - nand_get_device(chip, FL_READING); + ret = nand_get_device(chip); + if (ret) + return ret; + nand_select_target(chip, chipnr); ret = nand_block_checkbad(chip, offs, 0); @@ -4389,7 +4340,13 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) */ static int nand_suspend(struct mtd_info *mtd) { - return nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED); + struct nand_chip *chip = mtd_to_nand(mtd); + + mutex_lock(&chip->lock); + chip->suspended = 1; + mutex_unlock(&chip->lock); + + return 0; } /** @@ -4400,11 +4357,13 @@ static void nand_resume(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); - if (chip->state == FL_PM_SUSPENDED) - nand_release_device(chip); + mutex_lock(&chip->lock); + if (chip->suspended) + chip->suspended = 0; else pr_err("%s called for a chip which is not in suspended state\n", __func__); + mutex_unlock(&chip->lock); } /** @@ -4414,7 +4373,7 @@ static void nand_resume(struct mtd_info *mtd) */ static void nand_shutdown(struct mtd_info *mtd) { - nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED); + nand_suspend(mtd); } /* Set default functions */ @@ -5019,6 +4978,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, /* Assume all dies are deselected when we enter nand_scan_ident(). */ chip->cur_cs = -1; + mutex_init(&chip->lock); + /* Enforce the right timings for reset/detection */ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0); @@ -5061,11 +5022,15 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, u8 id[2]; /* See comment in nand_get_flash_type for reset */ - nand_reset(chip, i); + ret = nand_reset(chip, i); + if (ret) + break; nand_select_target(chip, i); /* Send the command for reading device ID */ - nand_readid_op(chip, 0, id, sizeof(id)); + ret = nand_readid_op(chip, 0, id, sizeof(id)); + if (ret) + break; /* Read manufacturer and device IDs */ if (nand_maf_id != id[0] || nand_dev_id != id[1]) { nand_deselect_target(chip); @@ -5556,6 +5521,7 @@ static int nand_scan_tail(struct nand_chip *chip) } if (!ecc->read_page) ecc->read_page = nand_read_page_hwecc_oob_first; + /* fall through */ case NAND_ECC_HW: /* Use standard hwecc read page function? */ @@ -5575,6 +5541,7 @@ static int nand_scan_tail(struct nand_chip *chip) ecc->read_subpage = nand_read_subpage; if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) ecc->write_subpage = nand_write_subpage_hwecc; + /* fall through */ case NAND_ECC_HW_SYNDROME: if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && @@ -5612,6 +5579,7 @@ static int nand_scan_tail(struct nand_chip *chip) ecc->size, mtd->writesize); ecc->mode = NAND_ECC_SOFT; ecc->algo = NAND_ECC_HAMMING; + /* fall through */ case NAND_ECC_SOFT: ret = nand_set_ecc_soft_ops(chip); @@ -5718,9 +5686,6 @@ static int nand_scan_tail(struct nand_chip *chip) } chip->subpagesize = mtd->writesize >> mtd->subpage_sft; - /* Initialize state */ - chip->state = FL_READY; - /* Invalidate the pagebuffer reference */ chip->pagebuf = -1; diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index 43575943f13b..f2526ec616a6 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -331,6 +331,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command, */ if (column == -1 && page_addr == -1) return; + /* fall through */ default: /* @@ -483,7 +484,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command, chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); - /* This applies to read commands */ + /* fall through - This applies to read commands */ default: /* * If we don't have access to the busy pin, we apply the given diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index 68e8b9f7f372..8f280a2962c8 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -994,12 +994,9 @@ static int omap_wait(struct nand_chip *this) { struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this)); unsigned long timeo = jiffies; - int status, state = this->state; + int status; - if (state == FL_ERASING) - timeo += msecs_to_jiffies(400); - else - timeo += msecs_to_jiffies(20); + timeo += msecs_to_jiffies(400); writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command); while (time_before(jiffies, timeo)) { @@ -2173,11 +2170,8 @@ static const struct nand_controller_ops omap_nand_controller_ops = { }; /* Shared among all NAND instances to synchronize access to the ECC Engine */ -static struct nand_controller omap_gpmc_controller = { - .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock), - .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq), - .ops = &omap_nand_controller_ops, -}; +static struct nand_controller omap_gpmc_controller; +static bool omap_gpmc_controller_initialized; static int omap_nand_probe(struct platform_device *pdev) { @@ -2227,6 +2221,12 @@ static int omap_nand_probe(struct platform_device *pdev) info->phys_base = res->start; + if (!omap_gpmc_controller_initialized) { + omap_gpmc_controller.ops = &omap_nand_controller_ops; + nand_controller_init(&omap_gpmc_controller); + omap_gpmc_controller_initialized = true; + } + nand_chip->controller = &omap_gpmc_controller; nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R; diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index c01422d953dd..86456216fb93 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -369,8 +369,7 @@ static int r852_wait(struct nand_chip *chip) unsigned long timeout; u8 status; - timeout = jiffies + (chip->state == FL_ERASING ? - msecs_to_jiffies(400) : msecs_to_jiffies(20)); + timeout = jiffies + msecs_to_jiffies(400); while (time_before(jiffies, timeout)) if (chip->legacy.dev_ready(chip)) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c new file mode 100644 index 000000000000..999ca6a66036 --- /dev/null +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -0,0 +1,2073 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2018 + * Author: Christophe Kerello <christophe.kerello@st.com> + */ + +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/rawnand.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +/* Bad block marker length */ +#define FMC2_BBM_LEN 2 + +/* ECC step size */ +#define FMC2_ECC_STEP_SIZE 512 + +/* BCHDSRx registers length */ +#define FMC2_BCHDSRS_LEN 20 + +/* HECCR length */ +#define FMC2_HECCR_LEN 4 + +/* Max requests done for a 8k nand page size */ +#define FMC2_MAX_SG 16 + +/* Max chip enable */ +#define FMC2_MAX_CE 2 + +/* Max ECC buffer length */ +#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) + +/* Timings */ +#define FMC2_THIZ 1 +#define FMC2_TIO 8000 +#define FMC2_TSYNC 3000 +#define FMC2_PCR_TIMING_MASK 0xf +#define FMC2_PMEM_PATT_TIMING_MASK 0xff + +/* FMC2 Controller Registers */ +#define FMC2_BCR1 0x0 +#define FMC2_PCR 0x80 +#define FMC2_SR 0x84 +#define FMC2_PMEM 0x88 +#define FMC2_PATT 0x8c +#define FMC2_HECCR 0x94 +#define FMC2_CSQCR 0x200 +#define FMC2_CSQCFGR1 0x204 +#define FMC2_CSQCFGR2 0x208 +#define FMC2_CSQCFGR3 0x20c +#define FMC2_CSQAR1 0x210 +#define FMC2_CSQAR2 0x214 +#define FMC2_CSQIER 0x220 +#define FMC2_CSQISR 0x224 +#define FMC2_CSQICR 0x228 +#define FMC2_CSQEMSR 0x230 +#define FMC2_BCHIER 0x250 +#define FMC2_BCHISR 0x254 +#define FMC2_BCHICR 0x258 +#define FMC2_BCHPBR1 0x260 +#define FMC2_BCHPBR2 0x264 +#define FMC2_BCHPBR3 0x268 +#define FMC2_BCHPBR4 0x26c +#define FMC2_BCHDSR0 0x27c +#define FMC2_BCHDSR1 0x280 +#define FMC2_BCHDSR2 0x284 +#define FMC2_BCHDSR3 0x288 +#define FMC2_BCHDSR4 0x28c + +/* Register: FMC2_BCR1 */ +#define FMC2_BCR1_FMC2EN BIT(31) + +/* Register: FMC2_PCR */ +#define FMC2_PCR_PWAITEN BIT(1) +#define FMC2_PCR_PBKEN BIT(2) +#define FMC2_PCR_PWID_MASK GENMASK(5, 4) +#define FMC2_PCR_PWID(x) (((x) & 0x3) << 4) +#define FMC2_PCR_PWID_BUSWIDTH_8 0 +#define FMC2_PCR_PWID_BUSWIDTH_16 1 +#define FMC2_PCR_ECCEN BIT(6) +#define FMC2_PCR_ECCALG BIT(8) +#define FMC2_PCR_TCLR_MASK GENMASK(12, 9) +#define FMC2_PCR_TCLR(x) (((x) & 0xf) << 9) +#define FMC2_PCR_TCLR_DEFAULT 0xf +#define FMC2_PCR_TAR_MASK GENMASK(16, 13) +#define FMC2_PCR_TAR(x) (((x) & 0xf) << 13) +#define FMC2_PCR_TAR_DEFAULT 0xf +#define FMC2_PCR_ECCSS_MASK GENMASK(19, 17) +#define FMC2_PCR_ECCSS(x) (((x) & 0x7) << 17) +#define FMC2_PCR_ECCSS_512 1 +#define FMC2_PCR_ECCSS_2048 3 +#define FMC2_PCR_BCHECC BIT(24) +#define FMC2_PCR_WEN BIT(25) + +/* Register: FMC2_SR */ +#define FMC2_SR_NWRF BIT(6) + +/* Register: FMC2_PMEM */ +#define FMC2_PMEM_MEMSET(x) (((x) & 0xff) << 0) +#define FMC2_PMEM_MEMWAIT(x) (((x) & 0xff) << 8) +#define FMC2_PMEM_MEMHOLD(x) (((x) & 0xff) << 16) +#define FMC2_PMEM_MEMHIZ(x) (((x) & 0xff) << 24) +#define FMC2_PMEM_DEFAULT 0x0a0a0a0a + +/* Register: FMC2_PATT */ +#define FMC2_PATT_ATTSET(x) (((x) & 0xff) << 0) +#define FMC2_PATT_ATTWAIT(x) (((x) & 0xff) << 8) +#define FMC2_PATT_ATTHOLD(x) (((x) & 0xff) << 16) +#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) +#define FMC2_PATT_DEFAULT 0x0a0a0a0a + +/* Register: FMC2_CSQCR */ +#define FMC2_CSQCR_CSQSTART BIT(0) + +/* Register: FMC2_CSQCFGR1 */ +#define FMC2_CSQCFGR1_CMD2EN BIT(1) +#define FMC2_CSQCFGR1_DMADEN BIT(2) +#define FMC2_CSQCFGR1_ACYNBR(x) (((x) & 0x7) << 4) +#define FMC2_CSQCFGR1_CMD1(x) (((x) & 0xff) << 8) +#define FMC2_CSQCFGR1_CMD2(x) (((x) & 0xff) << 16) +#define FMC2_CSQCFGR1_CMD1T BIT(24) +#define FMC2_CSQCFGR1_CMD2T BIT(25) + +/* Register: FMC2_CSQCFGR2 */ +#define FMC2_CSQCFGR2_SQSDTEN BIT(0) +#define FMC2_CSQCFGR2_RCMD2EN BIT(1) +#define FMC2_CSQCFGR2_DMASEN BIT(2) +#define FMC2_CSQCFGR2_RCMD1(x) (((x) & 0xff) << 8) +#define FMC2_CSQCFGR2_RCMD2(x) (((x) & 0xff) << 16) +#define FMC2_CSQCFGR2_RCMD1T BIT(24) +#define FMC2_CSQCFGR2_RCMD2T BIT(25) + +/* Register: FMC2_CSQCFGR3 */ +#define FMC2_CSQCFGR3_SNBR(x) (((x) & 0x1f) << 8) +#define FMC2_CSQCFGR3_AC1T BIT(16) +#define FMC2_CSQCFGR3_AC2T BIT(17) +#define FMC2_CSQCFGR3_AC3T BIT(18) +#define FMC2_CSQCFGR3_AC4T BIT(19) +#define FMC2_CSQCFGR3_AC5T BIT(20) +#define FMC2_CSQCFGR3_SDT BIT(21) +#define FMC2_CSQCFGR3_RAC1T BIT(22) +#define FMC2_CSQCFGR3_RAC2T BIT(23) + +/* Register: FMC2_CSQCAR1 */ +#define FMC2_CSQCAR1_ADDC1(x) (((x) & 0xff) << 0) +#define FMC2_CSQCAR1_ADDC2(x) (((x) & 0xff) << 8) +#define FMC2_CSQCAR1_ADDC3(x) (((x) & 0xff) << 16) +#define FMC2_CSQCAR1_ADDC4(x) (((x) & 0xff) << 24) + +/* Register: FMC2_CSQCAR2 */ +#define FMC2_CSQCAR2_ADDC5(x) (((x) & 0xff) << 0) +#define FMC2_CSQCAR2_NANDCEN(x) (((x) & 0x3) << 10) +#define FMC2_CSQCAR2_SAO(x) (((x) & 0xffff) << 16) + +/* Register: FMC2_CSQIER */ +#define FMC2_CSQIER_TCIE BIT(0) + +/* Register: FMC2_CSQICR */ +#define FMC2_CSQICR_CLEAR_IRQ GENMASK(4, 0) + +/* Register: FMC2_CSQEMSR */ +#define FMC2_CSQEMSR_SEM GENMASK(15, 0) + +/* Register: FMC2_BCHIER */ +#define FMC2_BCHIER_DERIE BIT(1) +#define FMC2_BCHIER_EPBRIE BIT(4) + +/* Register: FMC2_BCHICR */ +#define FMC2_BCHICR_CLEAR_IRQ GENMASK(4, 0) + +/* Register: FMC2_BCHDSR0 */ +#define FMC2_BCHDSR0_DUE BIT(0) +#define FMC2_BCHDSR0_DEF BIT(1) +#define FMC2_BCHDSR0_DEN_MASK GENMASK(7, 4) +#define FMC2_BCHDSR0_DEN_SHIFT 4 + +/* Register: FMC2_BCHDSR1 */ +#define FMC2_BCHDSR1_EBP1_MASK GENMASK(12, 0) +#define FMC2_BCHDSR1_EBP2_MASK GENMASK(28, 16) +#define FMC2_BCHDSR1_EBP2_SHIFT 16 + +/* Register: FMC2_BCHDSR2 */ +#define FMC2_BCHDSR2_EBP3_MASK GENMASK(12, 0) +#define FMC2_BCHDSR2_EBP4_MASK GENMASK(28, 16) +#define FMC2_BCHDSR2_EBP4_SHIFT 16 + +/* Register: FMC2_BCHDSR3 */ +#define FMC2_BCHDSR3_EBP5_MASK GENMASK(12, 0) +#define FMC2_BCHDSR3_EBP6_MASK GENMASK(28, 16) +#define FMC2_BCHDSR3_EBP6_SHIFT 16 + +/* Register: FMC2_BCHDSR4 */ +#define FMC2_BCHDSR4_EBP7_MASK GENMASK(12, 0) +#define FMC2_BCHDSR4_EBP8_MASK GENMASK(28, 16) +#define FMC2_BCHDSR4_EBP8_SHIFT 16 + +enum stm32_fmc2_ecc { + FMC2_ECC_HAM = 1, + FMC2_ECC_BCH4 = 4, + FMC2_ECC_BCH8 = 8 +}; + +enum stm32_fmc2_irq_state { + FMC2_IRQ_UNKNOWN = 0, + FMC2_IRQ_BCH, + FMC2_IRQ_SEQ +}; + +struct stm32_fmc2_timings { + u8 tclr; + u8 tar; + u8 thiz; + u8 twait; + u8 thold_mem; + u8 tset_mem; + u8 thold_att; + u8 tset_att; +}; + +struct stm32_fmc2_nand { + struct nand_chip chip; + struct stm32_fmc2_timings timings; + int ncs; + int cs_used[FMC2_MAX_CE]; +}; + +static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip) +{ + return container_of(chip, struct stm32_fmc2_nand, chip); +} + +struct stm32_fmc2_nfc { + struct nand_controller base; + struct stm32_fmc2_nand nand; + struct device *dev; + void __iomem *io_base; + void __iomem *data_base[FMC2_MAX_CE]; + void __iomem *cmd_base[FMC2_MAX_CE]; + void __iomem *addr_base[FMC2_MAX_CE]; + phys_addr_t io_phys_addr; + phys_addr_t data_phys_addr[FMC2_MAX_CE]; + struct clk *clk; + u8 irq_state; + + struct dma_chan *dma_tx_ch; + struct dma_chan *dma_rx_ch; + struct dma_chan *dma_ecc_ch; + struct sg_table dma_data_sg; + struct sg_table dma_ecc_sg; + u8 *ecc_buf; + int dma_ecc_len; + + struct completion complete; + struct completion dma_data_complete; + struct completion dma_ecc_complete; + + u8 cs_assigned; + int cs_sel; +}; + +static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base) +{ + return container_of(base, struct stm32_fmc2_nfc, base); +} + +/* Timings configuration */ +static void stm32_fmc2_timings_init(struct nand_chip *chip) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); + struct stm32_fmc2_timings *timings = &nand->timings; + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pmem, patt; + + /* Set tclr/tar timings */ + pcr &= ~FMC2_PCR_TCLR_MASK; + pcr |= FMC2_PCR_TCLR(timings->tclr); + pcr &= ~FMC2_PCR_TAR_MASK; + pcr |= FMC2_PCR_TAR(timings->tar); + + /* Set tset/twait/thold/thiz timings in common bank */ + pmem = FMC2_PMEM_MEMSET(timings->tset_mem); + pmem |= FMC2_PMEM_MEMWAIT(timings->twait); + pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem); + pmem |= FMC2_PMEM_MEMHIZ(timings->thiz); + + /* Set tset/twait/thold/thiz timings in attribut bank */ + patt = FMC2_PATT_ATTSET(timings->tset_att); + patt |= FMC2_PATT_ATTWAIT(timings->twait); + patt |= FMC2_PATT_ATTHOLD(timings->thold_att); + patt |= FMC2_PATT_ATTHIZ(timings->thiz); + + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM); + writel_relaxed(patt, fmc2->io_base + FMC2_PATT); +} + +/* Controller configuration */ +static void stm32_fmc2_setup(struct nand_chip *chip) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + + /* Configure ECC algorithm (default configuration is Hamming) */ + pcr &= ~FMC2_PCR_ECCALG; + pcr &= ~FMC2_PCR_BCHECC; + if (chip->ecc.strength == FMC2_ECC_BCH8) { + pcr |= FMC2_PCR_ECCALG; + pcr |= FMC2_PCR_BCHECC; + } else if (chip->ecc.strength == FMC2_ECC_BCH4) { + pcr |= FMC2_PCR_ECCALG; + } + + /* Set buswidth */ + pcr &= ~FMC2_PCR_PWID_MASK; + if (chip->options & NAND_BUSWIDTH_16) + pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); + + /* Set ECC sector size */ + pcr &= ~FMC2_PCR_ECCSS_MASK; + pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512); + + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); +} + +/* Select target */ +static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); + struct dma_slave_config dma_cfg; + int ret; + + if (nand->cs_used[chipnr] == fmc2->cs_sel) + return 0; + + fmc2->cs_sel = nand->cs_used[chipnr]; + + /* FMC2 setup routine */ + stm32_fmc2_setup(chip); + + /* Apply timings */ + stm32_fmc2_timings_init(chip); + + if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) { + memset(&dma_cfg, 0, sizeof(dma_cfg)); + dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel]; + dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel]; + dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_cfg.src_maxburst = 32; + dma_cfg.dst_maxburst = 32; + + ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg); + if (ret) { + dev_err(fmc2->dev, "tx DMA engine slave config failed\n"); + return ret; + } + + ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg); + if (ret) { + dev_err(fmc2->dev, "rx DMA engine slave config failed\n"); + return ret; + } + } + + if (fmc2->dma_ecc_ch) { + /* + * Hamming: we read HECCR register + * BCH4/BCH8: we read BCHDSRSx registers + */ + memset(&dma_cfg, 0, sizeof(dma_cfg)); + dma_cfg.src_addr = fmc2->io_phys_addr; + dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ? + FMC2_HECCR : FMC2_BCHDSR0; + dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg); + if (ret) { + dev_err(fmc2->dev, "ECC DMA engine slave config failed\n"); + return ret; + } + + /* Calculate ECC length needed for one sector */ + fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ? + FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN; + } + + return 0; +} + +/* Set bus width to 16-bit or 8-bit */ +static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set) +{ + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + + pcr &= ~FMC2_PCR_PWID_MASK; + if (set) + pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); +} + +/* Enable/disable ECC */ +static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable) +{ + u32 pcr = readl(fmc2->io_base + FMC2_PCR); + + pcr &= ~FMC2_PCR_ECCEN; + if (enable) + pcr |= FMC2_PCR_ECCEN; + writel(pcr, fmc2->io_base + FMC2_PCR); +} + +/* Enable irq sources in case of the sequencer is used */ +static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2) +{ + u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); + + csqier |= FMC2_CSQIER_TCIE; + + fmc2->irq_state = FMC2_IRQ_SEQ; + + writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); +} + +/* Disable irq sources in case of the sequencer is used */ +static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2) +{ + u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); + + csqier &= ~FMC2_CSQIER_TCIE; + + writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); + + fmc2->irq_state = FMC2_IRQ_UNKNOWN; +} + +/* Clear irq sources in case of the sequencer is used */ +static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2) +{ + writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR); +} + +/* Enable irq sources in case of bch is used */ +static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2, + int mode) +{ + u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); + + if (mode == NAND_ECC_WRITE) + bchier |= FMC2_BCHIER_EPBRIE; + else + bchier |= FMC2_BCHIER_DERIE; + + fmc2->irq_state = FMC2_IRQ_BCH; + + writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); +} + +/* Disable irq sources in case of bch is used */ +static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2) +{ + u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); + + bchier &= ~FMC2_BCHIER_DERIE; + bchier &= ~FMC2_BCHIER_EPBRIE; + + writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); + + fmc2->irq_state = FMC2_IRQ_UNKNOWN; +} + +/* Clear irq sources in case of bch is used */ +static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2) +{ + writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR); +} + +/* + * Enable ECC logic and reset syndrome/parity bits previously calculated + * Syndrome/parity bits is cleared by setting the ECCEN bit to 0 + */ +static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + + stm32_fmc2_set_ecc(fmc2, false); + + if (chip->ecc.strength != FMC2_ECC_HAM) { + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + + if (mode == NAND_ECC_WRITE) + pcr |= FMC2_PCR_WEN; + else + pcr &= ~FMC2_PCR_WEN; + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + + reinit_completion(&fmc2->complete); + stm32_fmc2_clear_bch_irq(fmc2); + stm32_fmc2_enable_bch_irq(fmc2, mode); + } + + stm32_fmc2_set_ecc(fmc2, true); +} + +/* + * ECC Hamming calculation + * ECC is 3 bytes for 512 bytes of data (supports error correction up to + * max of 1-bit) + */ +static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc) +{ + ecc[0] = ecc_sta; + ecc[1] = ecc_sta >> 8; + ecc[2] = ecc_sta >> 16; +} + +static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data, + u8 *ecc) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u32 sr, heccr; + int ret; + + ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR, + sr, sr & FMC2_SR_NWRF, 10, 1000); + if (ret) { + dev_err(fmc2->dev, "ham timeout\n"); + return ret; + } + + heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR); + + stm32_fmc2_ham_set_ecc(heccr, ecc); + + /* Disable ECC */ + stm32_fmc2_set_ecc(fmc2, false); + + return 0; +} + +static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) +{ + u8 bit_position = 0, b0, b1, b2; + u32 byte_addr = 0, b; + u32 i, shifting = 1; + + /* Indicate which bit and byte is faulty (if any) */ + b0 = read_ecc[0] ^ calc_ecc[0]; + b1 = read_ecc[1] ^ calc_ecc[1]; + b2 = read_ecc[2] ^ calc_ecc[2]; + b = b0 | (b1 << 8) | (b2 << 16); + + /* No errors */ + if (likely(!b)) + return 0; + + /* Calculate bit position */ + for (i = 0; i < 3; i++) { + switch (b % 4) { + case 2: + bit_position += shifting; + case 1: + break; + default: + return -EBADMSG; + } + shifting <<= 1; + b >>= 2; + } + + /* Calculate byte position */ + shifting = 1; + for (i = 0; i < 9; i++) { + switch (b % 4) { + case 2: + byte_addr += shifting; + case 1: + break; + default: + return -EBADMSG; + } + shifting <<= 1; + b >>= 2; + } + + /* Flip the bit */ + dat[byte_addr] ^= (1 << bit_position); + + return 1; +} + +/* + * ECC BCH calculation and correction + * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to + * max of 4-bit/8-bit) + */ +static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data, + u8 *ecc) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u32 bchpbr; + + /* Wait until the BCH code is ready */ + if (!wait_for_completion_timeout(&fmc2->complete, + msecs_to_jiffies(1000))) { + dev_err(fmc2->dev, "bch timeout\n"); + stm32_fmc2_disable_bch_irq(fmc2); + return -ETIMEDOUT; + } + + /* Read parity bits */ + bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1); + ecc[0] = bchpbr; + ecc[1] = bchpbr >> 8; + ecc[2] = bchpbr >> 16; + ecc[3] = bchpbr >> 24; + + bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2); + ecc[4] = bchpbr; + ecc[5] = bchpbr >> 8; + ecc[6] = bchpbr >> 16; + + if (chip->ecc.strength == FMC2_ECC_BCH8) { + ecc[7] = bchpbr >> 24; + + bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3); + ecc[8] = bchpbr; + ecc[9] = bchpbr >> 8; + ecc[10] = bchpbr >> 16; + ecc[11] = bchpbr >> 24; + + bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4); + ecc[12] = bchpbr; + } + + /* Disable ECC */ + stm32_fmc2_set_ecc(fmc2, false); + + return 0; +} + +/* BCH algorithm correction */ +static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) +{ + u32 bchdsr0 = ecc_sta[0]; + u32 bchdsr1 = ecc_sta[1]; + u32 bchdsr2 = ecc_sta[2]; + u32 bchdsr3 = ecc_sta[3]; + u32 bchdsr4 = ecc_sta[4]; + u16 pos[8]; + int i, den; + unsigned int nb_errs = 0; + + /* No errors found */ + if (likely(!(bchdsr0 & FMC2_BCHDSR0_DEF))) + return 0; + + /* Too many errors detected */ + if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE)) + return -EBADMSG; + + pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK; + pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT; + pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK; + pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT; + pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK; + pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT; + pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK; + pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT; + + den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT; + for (i = 0; i < den; i++) { + if (pos[i] < eccsize * 8) { + change_bit(pos[i], (unsigned long *)dat); + nb_errs++; + } + } + + return nb_errs; +} + +static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u32 ecc_sta[5]; + + /* Wait until the decoding error is ready */ + if (!wait_for_completion_timeout(&fmc2->complete, + msecs_to_jiffies(1000))) { + dev_err(fmc2->dev, "bch timeout\n"); + stm32_fmc2_disable_bch_irq(fmc2); + return -ETIMEDOUT; + } + + ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0); + ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1); + ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2); + ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3); + ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4); + + /* Disable ECC */ + stm32_fmc2_set_ecc(fmc2, false); + + return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta); +} + +static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret, i, s, stat, eccsize = chip->ecc.size; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + int eccstrength = chip->ecc.strength; + u8 *p = buf; + u8 *ecc_calc = chip->ecc.calc_buf; + u8 *ecc_code = chip->ecc.code_buf; + unsigned int max_bitflips = 0; + + ret = nand_read_page_op(chip, page, 0, NULL, 0); + if (ret) + return ret; + + for (i = mtd->writesize + FMC2_BBM_LEN, s = 0; s < eccsteps; + s++, i += eccbytes, p += eccsize) { + chip->ecc.hwctl(chip, NAND_ECC_READ); + + /* Read the nand page sector (512 bytes) */ + ret = nand_change_read_column_op(chip, s * eccsize, p, + eccsize, false); + if (ret) + return ret; + + /* Read the corresponding ECC bytes */ + ret = nand_change_read_column_op(chip, i, ecc_code, + eccbytes, false); + if (ret) + return ret; + + /* Correct the data */ + stat = chip->ecc.correct(chip, p, ecc_code, ecc_calc); + if (stat == -EBADMSG) + /* Check for empty pages with bitflips */ + stat = nand_check_erased_ecc_chunk(p, eccsize, + ecc_code, eccbytes, + NULL, 0, + eccstrength); + + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + /* Read oob */ + if (oob_required) { + ret = nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, mtd->oobsize, + false); + if (ret) + return ret; + } + + return max_bitflips; +} + +/* Sequencer read/write configuration */ +static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, + int raw, bool write_data) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + u32 csqcfgr1, csqcfgr2, csqcfgr3; + u32 csqar1, csqar2; + u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN; + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + + if (write_data) + pcr |= FMC2_PCR_WEN; + else + pcr &= ~FMC2_PCR_WEN; + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + + /* + * - Set Program Page/Page Read command + * - Enable DMA request data + * - Set timings + */ + csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T; + if (write_data) + csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN); + else + csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) | + FMC2_CSQCFGR1_CMD2EN | + FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) | + FMC2_CSQCFGR1_CMD2T; + + /* + * - Set Random Data Input/Random Data Read command + * - Enable the sequencer to access the Spare data area + * - Enable DMA request status decoding for read + * - Set timings + */ + if (write_data) + csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN); + else + csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) | + FMC2_CSQCFGR2_RCMD2EN | + FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) | + FMC2_CSQCFGR2_RCMD1T | + FMC2_CSQCFGR2_RCMD2T; + if (!raw) { + csqcfgr2 |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN; + csqcfgr2 |= FMC2_CSQCFGR2_SQSDTEN; + } + + /* + * - Set the number of sectors to be written + * - Set timings + */ + csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1); + if (write_data) { + csqcfgr3 |= FMC2_CSQCFGR3_RAC2T; + if (chip->options & NAND_ROW_ADDR_3) + csqcfgr3 |= FMC2_CSQCFGR3_AC5T; + else + csqcfgr3 |= FMC2_CSQCFGR3_AC4T; + } + + /* + * Set the fourth first address cycles + * Byte 1 and byte 2 => column, we start at 0x0 + * Byte 3 and byte 4 => page + */ + csqar1 = FMC2_CSQCAR1_ADDC3(page); + csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8); + + /* + * - Set chip enable number + * - Set ECC byte offset in the spare area + * - Calculate the number of address cycles to be issued + * - Set byte 5 of address cycle if needed + */ + csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel); + if (chip->options & NAND_BUSWIDTH_16) + csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1); + else + csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset); + if (chip->options & NAND_ROW_ADDR_3) { + csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5); + csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16); + } else { + csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4); + } + + writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1); + writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2); + writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3); + writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1); + writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2); +} + +static void stm32_fmc2_dma_callback(void *arg) +{ + complete((struct completion *)arg); +} + +/* Read/write data from/to a page */ +static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, + int raw, bool write_data) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct dma_async_tx_descriptor *desc_data, *desc_ecc; + struct scatterlist *sg; + struct dma_chan *dma_ch = fmc2->dma_rx_ch; + enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE; + enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM; + u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR); + int eccsteps = chip->ecc.steps; + int eccsize = chip->ecc.size; + const u8 *p = buf; + int s, ret; + + /* Configure DMA data */ + if (write_data) { + dma_data_dir = DMA_TO_DEVICE; + dma_transfer_dir = DMA_MEM_TO_DEV; + dma_ch = fmc2->dma_tx_ch; + } + + for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) { + sg_set_buf(sg, p, eccsize); + p += eccsize; + } + + ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl, + eccsteps, dma_data_dir); + if (ret < 0) + return ret; + + desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl, + eccsteps, dma_transfer_dir, + DMA_PREP_INTERRUPT); + if (!desc_data) { + ret = -ENOMEM; + goto err_unmap_data; + } + + reinit_completion(&fmc2->dma_data_complete); + reinit_completion(&fmc2->complete); + desc_data->callback = stm32_fmc2_dma_callback; + desc_data->callback_param = &fmc2->dma_data_complete; + ret = dma_submit_error(dmaengine_submit(desc_data)); + if (ret) + goto err_unmap_data; + + dma_async_issue_pending(dma_ch); + + if (!write_data && !raw) { + /* Configure DMA ECC status */ + p = fmc2->ecc_buf; + for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) { + sg_set_buf(sg, p, fmc2->dma_ecc_len); + p += fmc2->dma_ecc_len; + } + + ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl, + eccsteps, dma_data_dir); + if (ret < 0) + goto err_unmap_data; + + desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch, + fmc2->dma_ecc_sg.sgl, + eccsteps, dma_transfer_dir, + DMA_PREP_INTERRUPT); + if (!desc_ecc) { + ret = -ENOMEM; + goto err_unmap_ecc; + } + + reinit_completion(&fmc2->dma_ecc_complete); + desc_ecc->callback = stm32_fmc2_dma_callback; + desc_ecc->callback_param = &fmc2->dma_ecc_complete; + ret = dma_submit_error(dmaengine_submit(desc_ecc)); + if (ret) + goto err_unmap_ecc; + + dma_async_issue_pending(fmc2->dma_ecc_ch); + } + + stm32_fmc2_clear_seq_irq(fmc2); + stm32_fmc2_enable_seq_irq(fmc2); + + /* Start the transfer */ + csqcr |= FMC2_CSQCR_CSQSTART; + writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR); + + /* Wait end of sequencer transfer */ + if (!wait_for_completion_timeout(&fmc2->complete, + msecs_to_jiffies(1000))) { + dev_err(fmc2->dev, "seq timeout\n"); + stm32_fmc2_disable_seq_irq(fmc2); + dmaengine_terminate_all(dma_ch); + if (!write_data && !raw) + dmaengine_terminate_all(fmc2->dma_ecc_ch); + ret = -ETIMEDOUT; + goto err_unmap_ecc; + } + + /* Wait DMA data transfer completion */ + if (!wait_for_completion_timeout(&fmc2->dma_data_complete, + msecs_to_jiffies(100))) { + dev_err(fmc2->dev, "data DMA timeout\n"); + dmaengine_terminate_all(dma_ch); + ret = -ETIMEDOUT; + } + + /* Wait DMA ECC transfer completion */ + if (!write_data && !raw) { + if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete, + msecs_to_jiffies(100))) { + dev_err(fmc2->dev, "ECC DMA timeout\n"); + dmaengine_terminate_all(fmc2->dma_ecc_ch); + ret = -ETIMEDOUT; + } + } + +err_unmap_ecc: + if (!write_data && !raw) + dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl, + eccsteps, dma_data_dir); + +err_unmap_data: + dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir); + + return ret; +} + +static int stm32_fmc2_sequencer_write(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page, int raw) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + /* Configure the sequencer */ + stm32_fmc2_rw_page_init(chip, page, raw, true); + + /* Write the page */ + ret = stm32_fmc2_xfer(chip, buf, raw, true); + if (ret) + return ret; + + /* Write oob */ + if (oob_required) { + ret = nand_change_write_column_op(chip, mtd->writesize, + chip->oob_poi, mtd->oobsize, + false); + if (ret) + return ret; + } + + return nand_prog_page_end_op(chip); +} + +static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip, + const u8 *buf, + int oob_required, + int page) +{ + int ret; + + /* Select the target */ + ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + if (ret) + return ret; + + return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false); +} + +static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip, + const u8 *buf, + int oob_required, + int page) +{ + int ret; + + /* Select the target */ + ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + if (ret) + return ret; + + return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true); +} + +/* Get a status indicating which sectors have errors */ +static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2) +{ + u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR); + + return csqemsr & FMC2_CSQEMSR_SEM; +} + +static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + int eccstrength = chip->ecc.strength; + int i, s, eccsize = chip->ecc.size; + u32 *ecc_sta = (u32 *)fmc2->ecc_buf; + u16 sta_map = stm32_fmc2_get_mapping_status(fmc2); + unsigned int max_bitflips = 0; + + for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) { + int stat = 0; + + if (eccstrength == FMC2_ECC_HAM) { + /* Ecc_sta = FMC2_HECCR */ + if (sta_map & BIT(s)) { + stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]); + stat = stm32_fmc2_ham_correct(chip, dat, + &read_ecc[i], + &calc_ecc[i]); + } + ecc_sta++; + } else { + /* + * Ecc_sta[0] = FMC2_BCHDSR0 + * Ecc_sta[1] = FMC2_BCHDSR1 + * Ecc_sta[2] = FMC2_BCHDSR2 + * Ecc_sta[3] = FMC2_BCHDSR3 + * Ecc_sta[4] = FMC2_BCHDSR4 + */ + if (sta_map & BIT(s)) + stat = stm32_fmc2_bch_decode(eccsize, dat, + ecc_sta); + ecc_sta += 5; + } + + if (stat == -EBADMSG) + /* Check for empty pages with bitflips */ + stat = nand_check_erased_ecc_chunk(dat, eccsize, + &read_ecc[i], + eccbytes, + NULL, 0, + eccstrength); + + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + return max_bitflips; +} + +static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u8 *ecc_calc = chip->ecc.calc_buf; + u8 *ecc_code = chip->ecc.code_buf; + u16 sta_map; + int ret; + + /* Select the target */ + ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + if (ret) + return ret; + + /* Configure the sequencer */ + stm32_fmc2_rw_page_init(chip, page, 0, false); + + /* Read the page */ + ret = stm32_fmc2_xfer(chip, buf, 0, false); + if (ret) + return ret; + + sta_map = stm32_fmc2_get_mapping_status(fmc2); + + /* Check if errors happen */ + if (likely(!sta_map)) { + if (oob_required) + return nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, + mtd->oobsize, false); + + return 0; + } + + /* Read oob */ + ret = nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, mtd->oobsize, false); + if (ret) + return ret; + + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + /* Correct data */ + return chip->ecc.correct(chip, buf, ecc_code, ecc_calc); +} + +static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + /* Select the target */ + ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + if (ret) + return ret; + + /* Configure the sequencer */ + stm32_fmc2_rw_page_init(chip, page, 1, false); + + /* Read the page */ + ret = stm32_fmc2_xfer(chip, buf, 1, false); + if (ret) + return ret; + + /* Read oob */ + if (oob_required) + return nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, mtd->oobsize, + false); + + return 0; +} + +static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id) +{ + struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id; + + if (fmc2->irq_state == FMC2_IRQ_SEQ) + /* Sequencer is used */ + stm32_fmc2_disable_seq_irq(fmc2); + else if (fmc2->irq_state == FMC2_IRQ_BCH) + /* BCH is used */ + stm32_fmc2_disable_bch_irq(fmc2); + + complete(&fmc2->complete); + + return IRQ_HANDLED; +} + +static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf, + unsigned int len, bool force_8bit) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel]; + + if (force_8bit && chip->options & NAND_BUSWIDTH_16) + /* Reconfigure bus width to 8-bit */ + stm32_fmc2_set_buswidth_16(fmc2, false); + + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) { + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) { + *(u8 *)buf = readb_relaxed(io_addr_r); + buf += sizeof(u8); + len -= sizeof(u8); + } + + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) && + len >= sizeof(u16)) { + *(u16 *)buf = readw_relaxed(io_addr_r); + buf += sizeof(u16); + len -= sizeof(u16); + } + } + + /* Buf is aligned */ + while (len >= sizeof(u32)) { + *(u32 *)buf = readl_relaxed(io_addr_r); + buf += sizeof(u32); + len -= sizeof(u32); + } + + /* Read remaining bytes */ + if (len >= sizeof(u16)) { + *(u16 *)buf = readw_relaxed(io_addr_r); + buf += sizeof(u16); + len -= sizeof(u16); + } + + if (len) + *(u8 *)buf = readb_relaxed(io_addr_r); + + if (force_8bit && chip->options & NAND_BUSWIDTH_16) + /* Reconfigure bus width to 16-bit */ + stm32_fmc2_set_buswidth_16(fmc2, true); +} + +static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, + unsigned int len, bool force_8bit) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel]; + + if (force_8bit && chip->options & NAND_BUSWIDTH_16) + /* Reconfigure bus width to 8-bit */ + stm32_fmc2_set_buswidth_16(fmc2, false); + + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) { + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) { + writeb_relaxed(*(u8 *)buf, io_addr_w); + buf += sizeof(u8); + len -= sizeof(u8); + } + + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) && + len >= sizeof(u16)) { + writew_relaxed(*(u16 *)buf, io_addr_w); + buf += sizeof(u16); + len -= sizeof(u16); + } + } + + /* Buf is aligned */ + while (len >= sizeof(u32)) { + writel_relaxed(*(u32 *)buf, io_addr_w); + buf += sizeof(u32); + len -= sizeof(u32); + } + + /* Write remaining bytes */ + if (len >= sizeof(u16)) { + writew_relaxed(*(u16 *)buf, io_addr_w); + buf += sizeof(u16); + len -= sizeof(u16); + } + + if (len) + writeb_relaxed(*(u8 *)buf, io_addr_w); + + if (force_8bit && chip->options & NAND_BUSWIDTH_16) + /* Reconfigure bus width to 16-bit */ + stm32_fmc2_set_buswidth_16(fmc2, true); +} + +static int stm32_fmc2_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + const struct nand_op_instr *instr = NULL; + unsigned int op_id, i; + int ret; + + ret = stm32_fmc2_select_chip(chip, op->cs); + if (ret) + return ret; + + if (check_only) + return ret; + + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb_relaxed(instr->ctx.cmd.opcode, + fmc2->cmd_base[fmc2->cs_sel]); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + writeb_relaxed(instr->ctx.addr.addrs[i], + fmc2->addr_base[fmc2->cs_sel]); + break; + + case NAND_OP_DATA_IN_INSTR: + stm32_fmc2_read_data(chip, instr->ctx.data.buf.in, + instr->ctx.data.len, + instr->ctx.data.force_8bit); + break; + + case NAND_OP_DATA_OUT_INSTR: + stm32_fmc2_write_data(chip, instr->ctx.data.buf.out, + instr->ctx.data.len, + instr->ctx.data.force_8bit); + break; + + case NAND_OP_WAITRDY_INSTR: + ret = nand_soft_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); + break; + } + } + + return ret; +} + +/* Controller initialization */ +static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2) +{ + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1); + + /* Set CS used to undefined */ + fmc2->cs_sel = -1; + + /* Enable wait feature and nand flash memory bank */ + pcr |= FMC2_PCR_PWAITEN; + pcr |= FMC2_PCR_PBKEN; + + /* Set buswidth to 8 bits mode for identification */ + pcr &= ~FMC2_PCR_PWID_MASK; + + /* ECC logic is disabled */ + pcr &= ~FMC2_PCR_ECCEN; + + /* Default mode */ + pcr &= ~FMC2_PCR_ECCALG; + pcr &= ~FMC2_PCR_BCHECC; + pcr &= ~FMC2_PCR_WEN; + + /* Set default ECC sector size */ + pcr &= ~FMC2_PCR_ECCSS_MASK; + pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048); + + /* Set default tclr/tar timings */ + pcr &= ~FMC2_PCR_TCLR_MASK; + pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT); + pcr &= ~FMC2_PCR_TAR_MASK; + pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT); + + /* Enable FMC2 controller */ + bcr1 |= FMC2_BCR1_FMC2EN; + + writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1); + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM); + writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT); +} + +/* Controller timings */ +static void stm32_fmc2_calc_timings(struct nand_chip *chip, + const struct nand_sdr_timings *sdrt) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); + struct stm32_fmc2_timings *tims = &nand->timings; + unsigned long hclk = clk_get_rate(fmc2->clk); + unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000); + int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att; + + tar = hclkp; + if (tar < sdrt->tAR_min) + tar = sdrt->tAR_min; + tims->tar = DIV_ROUND_UP(tar, hclkp) - 1; + if (tims->tar > FMC2_PCR_TIMING_MASK) + tims->tar = FMC2_PCR_TIMING_MASK; + + tclr = hclkp; + if (tclr < sdrt->tCLR_min) + tclr = sdrt->tCLR_min; + tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1; + if (tims->tclr > FMC2_PCR_TIMING_MASK) + tims->tclr = FMC2_PCR_TIMING_MASK; + + tims->thiz = FMC2_THIZ; + thiz = (tims->thiz + 1) * hclkp; + + /* + * tWAIT > tRP + * tWAIT > tWP + * tWAIT > tREA + tIO + */ + twait = hclkp; + if (twait < sdrt->tRP_min) + twait = sdrt->tRP_min; + if (twait < sdrt->tWP_min) + twait = sdrt->tWP_min; + if (twait < sdrt->tREA_max + FMC2_TIO) + twait = sdrt->tREA_max + FMC2_TIO; + tims->twait = DIV_ROUND_UP(twait, hclkp); + if (tims->twait == 0) + tims->twait = 1; + else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK) + tims->twait = FMC2_PMEM_PATT_TIMING_MASK; + + /* + * tSETUP_MEM > tCS - tWAIT + * tSETUP_MEM > tALS - tWAIT + * tSETUP_MEM > tDS - (tWAIT - tHIZ) + */ + tset_mem = hclkp; + if (sdrt->tCS_min > twait && (tset_mem < sdrt->tCS_min - twait)) + tset_mem = sdrt->tCS_min - twait; + if (sdrt->tALS_min > twait && (tset_mem < sdrt->tALS_min - twait)) + tset_mem = sdrt->tALS_min - twait; + if (twait > thiz && (sdrt->tDS_min > twait - thiz) && + (tset_mem < sdrt->tDS_min - (twait - thiz))) + tset_mem = sdrt->tDS_min - (twait - thiz); + tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp); + if (tims->tset_mem == 0) + tims->tset_mem = 1; + else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK) + tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK; + + /* + * tHOLD_MEM > tCH + * tHOLD_MEM > tREH - tSETUP_MEM + * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT) + */ + thold_mem = hclkp; + if (thold_mem < sdrt->tCH_min) + thold_mem = sdrt->tCH_min; + if (sdrt->tREH_min > tset_mem && + (thold_mem < sdrt->tREH_min - tset_mem)) + thold_mem = sdrt->tREH_min - tset_mem; + if ((sdrt->tRC_min > tset_mem + twait) && + (thold_mem < sdrt->tRC_min - (tset_mem + twait))) + thold_mem = sdrt->tRC_min - (tset_mem + twait); + if ((sdrt->tWC_min > tset_mem + twait) && + (thold_mem < sdrt->tWC_min - (tset_mem + twait))) + thold_mem = sdrt->tWC_min - (tset_mem + twait); + tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp); + if (tims->thold_mem == 0) + tims->thold_mem = 1; + else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK) + tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK; + + /* + * tSETUP_ATT > tCS - tWAIT + * tSETUP_ATT > tCLS - tWAIT + * tSETUP_ATT > tALS - tWAIT + * tSETUP_ATT > tRHW - tHOLD_MEM + * tSETUP_ATT > tDS - (tWAIT - tHIZ) + */ + tset_att = hclkp; + if (sdrt->tCS_min > twait && (tset_att < sdrt->tCS_min - twait)) + tset_att = sdrt->tCS_min - twait; + if (sdrt->tCLS_min > twait && (tset_att < sdrt->tCLS_min - twait)) + tset_att = sdrt->tCLS_min - twait; + if (sdrt->tALS_min > twait && (tset_att < sdrt->tALS_min - twait)) + tset_att = sdrt->tALS_min - twait; + if (sdrt->tRHW_min > thold_mem && + (tset_att < sdrt->tRHW_min - thold_mem)) + tset_att = sdrt->tRHW_min - thold_mem; + if (twait > thiz && (sdrt->tDS_min > twait - thiz) && + (tset_att < sdrt->tDS_min - (twait - thiz))) + tset_att = sdrt->tDS_min - (twait - thiz); + tims->tset_att = DIV_ROUND_UP(tset_att, hclkp); + if (tims->tset_att == 0) + tims->tset_att = 1; + else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK) + tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK; + + /* + * tHOLD_ATT > tALH + * tHOLD_ATT > tCH + * tHOLD_ATT > tCLH + * tHOLD_ATT > tCOH + * tHOLD_ATT > tDH + * tHOLD_ATT > tWB + tIO + tSYNC - tSETUP_MEM + * tHOLD_ATT > tADL - tSETUP_MEM + * tHOLD_ATT > tWH - tSETUP_MEM + * tHOLD_ATT > tWHR - tSETUP_MEM + * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT) + * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT) + */ + thold_att = hclkp; + if (thold_att < sdrt->tALH_min) + thold_att = sdrt->tALH_min; + if (thold_att < sdrt->tCH_min) + thold_att = sdrt->tCH_min; + if (thold_att < sdrt->tCLH_min) + thold_att = sdrt->tCLH_min; + if (thold_att < sdrt->tCOH_min) + thold_att = sdrt->tCOH_min; + if (thold_att < sdrt->tDH_min) + thold_att = sdrt->tDH_min; + if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) && + (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem)) + thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem; + if (sdrt->tADL_min > tset_mem && + (thold_att < sdrt->tADL_min - tset_mem)) + thold_att = sdrt->tADL_min - tset_mem; + if (sdrt->tWH_min > tset_mem && + (thold_att < sdrt->tWH_min - tset_mem)) + thold_att = sdrt->tWH_min - tset_mem; + if (sdrt->tWHR_min > tset_mem && + (thold_att < sdrt->tWHR_min - tset_mem)) + thold_att = sdrt->tWHR_min - tset_mem; + if ((sdrt->tRC_min > tset_att + twait) && + (thold_att < sdrt->tRC_min - (tset_att + twait))) + thold_att = sdrt->tRC_min - (tset_att + twait); + if ((sdrt->tWC_min > tset_att + twait) && + (thold_att < sdrt->tWC_min - (tset_att + twait))) + thold_att = sdrt->tWC_min - (tset_att + twait); + tims->thold_att = DIV_ROUND_UP(thold_att, hclkp); + if (tims->thold_att == 0) + tims->thold_att = 1; + else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK) + tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK; +} + +static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf) +{ + const struct nand_sdr_timings *sdrt; + + sdrt = nand_get_sdr_timings(conf); + if (IS_ERR(sdrt)) + return PTR_ERR(sdrt); + + if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + stm32_fmc2_calc_timings(chip, sdrt); + + /* Apply timings */ + stm32_fmc2_timings_init(chip); + + return 0; +} + +/* DMA configuration */ +static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2) +{ + int ret; + + fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx"); + fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx"); + fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc"); + + if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) { + dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n"); + return 0; + } + + ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL); + if (ret) + return ret; + + /* Allocate a buffer to store ECC status registers */ + fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN, + GFP_KERNEL); + if (!fmc2->ecc_buf) + return -ENOMEM; + + ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL); + if (ret) + return ret; + + init_completion(&fmc2->dma_data_complete); + init_completion(&fmc2->dma_ecc_complete); + + return 0; +} + +/* NAND callbacks setup */ +static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + + /* + * Specific callbacks to read/write a page depending on + * the mode (polling/sequencer) and the algo used (Hamming, BCH). + */ + if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) { + /* DMA => use sequencer mode callbacks */ + chip->ecc.correct = stm32_fmc2_sequencer_correct; + chip->ecc.write_page = stm32_fmc2_sequencer_write_page; + chip->ecc.read_page = stm32_fmc2_sequencer_read_page; + chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw; + chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw; + } else { + /* No DMA => use polling mode callbacks */ + chip->ecc.hwctl = stm32_fmc2_hwctl; + if (chip->ecc.strength == FMC2_ECC_HAM) { + /* Hamming is used */ + chip->ecc.calculate = stm32_fmc2_ham_calculate; + chip->ecc.correct = stm32_fmc2_ham_correct; + chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK; + } else { + /* BCH is used */ + chip->ecc.calculate = stm32_fmc2_bch_calculate; + chip->ecc.correct = stm32_fmc2_bch_correct; + chip->ecc.read_page = stm32_fmc2_read_page; + } + } + + /* Specific configurations depending on the algo used */ + if (chip->ecc.strength == FMC2_ECC_HAM) + chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 4 : 3; + else if (chip->ecc.strength == FMC2_ECC_BCH8) + chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 14 : 13; + else + chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7; +} + +/* FMC2 layout */ +static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (section) + return -ERANGE; + + oobregion->length = ecc->total; + oobregion->offset = FMC2_BBM_LEN; + + return 0; +} + +static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (section) + return -ERANGE; + + oobregion->length = mtd->oobsize - ecc->total - FMC2_BBM_LEN; + oobregion->offset = ecc->total + FMC2_BBM_LEN; + + return 0; +} + +static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = { + .ecc = stm32_fmc2_nand_ooblayout_ecc, + .free = stm32_fmc2_nand_ooblayout_free, +}; + +/* FMC2 caps */ +static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength) +{ + /* Hamming */ + if (strength == FMC2_ECC_HAM) + return 4; + + /* BCH8 */ + if (strength == FMC2_ECC_BCH8) + return 14; + + /* BCH4 */ + return 8; +} + +NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes, + FMC2_ECC_STEP_SIZE, + FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8); + +/* FMC2 controller ops */ +static int stm32_fmc2_attach_chip(struct nand_chip *chip) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + /* + * Only NAND_ECC_HW mode is actually supported + * Hamming => ecc.strength = 1 + * BCH4 => ecc.strength = 4 + * BCH8 => ecc.strength = 8 + * ECC sector size = 512 + */ + if (chip->ecc.mode != NAND_ECC_HW) { + dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n"); + return -EINVAL; + } + + ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps, + mtd->oobsize - FMC2_BBM_LEN); + if (ret) { + dev_err(fmc2->dev, "no valid ECC settings set\n"); + return ret; + } + + if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) { + dev_err(fmc2->dev, "nand page size is not supported\n"); + return -EINVAL; + } + + if (chip->bbt_options & NAND_BBT_USE_FLASH) + chip->bbt_options |= NAND_BBT_NO_OOB; + + /* NAND callbacks setup */ + stm32_fmc2_nand_callbacks_setup(chip); + + /* Define ECC layout */ + mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops); + + /* Configure bus width to 16-bit */ + if (chip->options & NAND_BUSWIDTH_16) + stm32_fmc2_set_buswidth_16(fmc2, true); + + return 0; +} + +static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = { + .attach_chip = stm32_fmc2_attach_chip, + .exec_op = stm32_fmc2_exec_op, + .setup_data_interface = stm32_fmc2_setup_interface, +}; + +/* FMC2 probe */ +static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, + struct device_node *dn) +{ + struct stm32_fmc2_nand *nand = &fmc2->nand; + u32 cs; + int ret, i; + + if (!of_get_property(dn, "reg", &nand->ncs)) + return -EINVAL; + + nand->ncs /= sizeof(u32); + if (!nand->ncs) { + dev_err(fmc2->dev, "invalid reg property size\n"); + return -EINVAL; + } + + for (i = 0; i < nand->ncs; i++) { + ret = of_property_read_u32_index(dn, "reg", i, &cs); + if (ret) { + dev_err(fmc2->dev, "could not retrieve reg property: %d\n", + ret); + return ret; + } + + if (cs > FMC2_MAX_CE) { + dev_err(fmc2->dev, "invalid reg value: %d\n", cs); + return -EINVAL; + } + + if (fmc2->cs_assigned & BIT(cs)) { + dev_err(fmc2->dev, "cs already assigned: %d\n", cs); + return -EINVAL; + } + + fmc2->cs_assigned |= BIT(cs); + nand->cs_used[i] = cs; + } + + nand_set_flash_node(&nand->chip, dn); + + return 0; +} + +static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2) +{ + struct device_node *dn = fmc2->dev->of_node; + struct device_node *child; + int nchips = of_get_child_count(dn); + int ret = 0; + + if (!nchips) { + dev_err(fmc2->dev, "NAND chip not defined\n"); + return -EINVAL; + } + + if (nchips > 1) { + dev_err(fmc2->dev, "too many NAND chips defined\n"); + return -EINVAL; + } + + for_each_child_of_node(dn, child) { + ret = stm32_fmc2_parse_child(fmc2, child); + if (ret < 0) { + of_node_put(child); + return ret; + } + } + + return ret; +} + +static int stm32_fmc2_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct reset_control *rstc; + struct stm32_fmc2_nfc *fmc2; + struct stm32_fmc2_nand *nand; + struct resource *res; + struct mtd_info *mtd; + struct nand_chip *chip; + int chip_cs, mem_region, ret, irq; + + fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL); + if (!fmc2) + return -ENOMEM; + + fmc2->dev = dev; + nand_controller_init(&fmc2->base); + fmc2->base.ops = &stm32_fmc2_nand_controller_ops; + + ret = stm32_fmc2_parse_dt(fmc2); + if (ret) + return ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + fmc2->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(fmc2->io_base)) + return PTR_ERR(fmc2->io_base); + + fmc2->io_phys_addr = res->start; + + for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE; + chip_cs++, mem_region += 3) { + if (!(fmc2->cs_assigned & BIT(chip_cs))) + continue; + + res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region); + fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(fmc2->data_base[chip_cs])) + return PTR_ERR(fmc2->data_base[chip_cs]); + + fmc2->data_phys_addr[chip_cs] = res->start; + + res = platform_get_resource(pdev, IORESOURCE_MEM, + mem_region + 1); + fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(fmc2->cmd_base[chip_cs])) + return PTR_ERR(fmc2->cmd_base[chip_cs]); + + res = platform_get_resource(pdev, IORESOURCE_MEM, + mem_region + 2); + fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(fmc2->addr_base[chip_cs])) + return PTR_ERR(fmc2->addr_base[chip_cs]); + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0, + dev_name(dev), fmc2); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + init_completion(&fmc2->complete); + + fmc2->clk = devm_clk_get(dev, NULL); + if (IS_ERR(fmc2->clk)) + return PTR_ERR(fmc2->clk); + + ret = clk_prepare_enable(fmc2->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + + rstc = devm_reset_control_get(dev, NULL); + if (!IS_ERR(rstc)) { + reset_control_assert(rstc); + reset_control_deassert(rstc); + } + + /* DMA setup */ + ret = stm32_fmc2_dma_setup(fmc2); + if (ret) + return ret; + + /* FMC2 init routine */ + stm32_fmc2_init(fmc2); + + nand = &fmc2->nand; + chip = &nand->chip; + mtd = nand_to_mtd(chip); + mtd->dev.parent = dev; + + chip->controller = &fmc2->base; + chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE | + NAND_USE_BOUNCE_BUFFER; + + /* Default ECC settings */ + chip->ecc.mode = NAND_ECC_HW; + chip->ecc.size = FMC2_ECC_STEP_SIZE; + chip->ecc.strength = FMC2_ECC_BCH8; + + /* Scan to find existence of the device */ + ret = nand_scan(chip, nand->ncs); + if (ret) + goto err_scan; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + goto err_device_register; + + platform_set_drvdata(pdev, fmc2); + + return 0; + +err_device_register: + nand_cleanup(chip); + +err_scan: + if (fmc2->dma_ecc_ch) + dma_release_channel(fmc2->dma_ecc_ch); + if (fmc2->dma_tx_ch) + dma_release_channel(fmc2->dma_tx_ch); + if (fmc2->dma_rx_ch) + dma_release_channel(fmc2->dma_rx_ch); + + sg_free_table(&fmc2->dma_data_sg); + sg_free_table(&fmc2->dma_ecc_sg); + + clk_disable_unprepare(fmc2->clk); + + return ret; +} + +static int stm32_fmc2_remove(struct platform_device *pdev) +{ + struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev); + struct stm32_fmc2_nand *nand = &fmc2->nand; + + nand_release(&nand->chip); + + if (fmc2->dma_ecc_ch) + dma_release_channel(fmc2->dma_ecc_ch); + if (fmc2->dma_tx_ch) + dma_release_channel(fmc2->dma_tx_ch); + if (fmc2->dma_rx_ch) + dma_release_channel(fmc2->dma_rx_ch); + + sg_free_table(&fmc2->dma_data_sg); + sg_free_table(&fmc2->dma_ecc_sg); + + clk_disable_unprepare(fmc2->clk); + + return 0; +} + +static int __maybe_unused stm32_fmc2_suspend(struct device *dev) +{ + struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev); + + clk_disable_unprepare(fmc2->clk); + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int __maybe_unused stm32_fmc2_resume(struct device *dev) +{ + struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev); + struct stm32_fmc2_nand *nand = &fmc2->nand; + int chip_cs, ret; + + pinctrl_pm_select_default_state(dev); + + ret = clk_prepare_enable(fmc2->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + + stm32_fmc2_init(fmc2); + + for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) { + if (!(fmc2->cs_assigned & BIT(chip_cs))) + continue; + + nand_reset(&nand->chip, chip_cs); + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend, + stm32_fmc2_resume); + +static const struct of_device_id stm32_fmc2_match[] = { + {.compatible = "st,stm32mp15-fmc2"}, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_fmc2_match); + +static struct platform_driver stm32_fmc2_driver = { + .probe = stm32_fmc2_probe, + .remove = stm32_fmc2_remove, + .driver = { + .name = "stm32_fmc2_nand", + .of_match_table = stm32_fmc2_match, + .pm = &stm32_fmc2_pm_ops, + }, +}; +module_platform_driver(stm32_fmc2_driver); + +MODULE_ALIAS("platform:stm32_fmc2_nand"); +MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index e828ee50a201..4282bc477761 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com> * @@ -10,16 +11,6 @@ * * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com> * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/dma-mapping.h> @@ -163,38 +154,36 @@ #define NFC_MAX_CS 7 -/* - * Chip Select structure: stores information related to NAND Chip Select +/** + * struct sunxi_nand_chip_sel - stores information related to NAND Chip Select * - * @cs: the NAND CS id used to communicate with a NAND Chip - * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the - * NFC + * @cs: the NAND CS id used to communicate with a NAND Chip + * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC */ struct sunxi_nand_chip_sel { u8 cs; s8 rb; }; -/* - * sunxi HW ECC infos: stores information related to HW ECC support +/** + * struct sunxi_nand_hw_ecc - stores information related to HW ECC support * - * @mode: the sunxi ECC mode field deduced from ECC requirements + * @mode: the sunxi ECC mode field deduced from ECC requirements */ struct sunxi_nand_hw_ecc { int mode; }; -/* - * NAND chip structure: stores NAND chip device related information +/** + * struct sunxi_nand_chip - stores NAND chip device related information * - * @node: used to store NAND chips into a list - * @nand: base NAND chip structure - * @mtd: base MTD structure - * @clk_rate: clk_rate required for this NAND chip - * @timing_cfg TIMING_CFG register value for this NAND chip - * @selected: current active CS - * @nsels: number of CS lines required by the NAND chip - * @sels: array of CS lines descriptions + * @node: used to store NAND chips into a list + * @nand: base NAND chip structure + * @clk_rate: clk_rate required for this NAND chip + * @timing_cfg: TIMING_CFG register value for this NAND chip + * @timing_ctl: TIMING_CTL register value for this NAND chip + * @nsels: number of CS lines required by the NAND chip + * @sels: array of CS lines descriptions */ struct sunxi_nand_chip { struct list_head node; @@ -202,11 +191,6 @@ struct sunxi_nand_chip { unsigned long clk_rate; u32 timing_cfg; u32 timing_ctl; - int selected; - int addr_cycles; - u32 addr[2]; - int cmd_cycles; - u8 cmd[2]; int nsels; struct sunxi_nand_chip_sel sels[0]; }; @@ -216,20 +200,21 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand) return container_of(nand, struct sunxi_nand_chip, nand); } -/* - * NAND Controller structure: stores sunxi NAND controller information +/** + * struct sunxi_nfc - stores sunxi NAND controller information * - * @controller: base controller structure - * @dev: parent device (used to print error messages) - * @regs: NAND controller registers - * @ahb_clk: NAND Controller AHB clock - * @mod_clk: NAND Controller mod clock - * @assigned_cs: bitmask describing already assigned CS lines - * @clk_rate: NAND controller current clock rate - * @chips: a list containing all the NAND chips attached to - * this NAND controller - * @complete: a completion object used to wait for NAND - * controller events + * @controller: base controller structure + * @dev: parent device (used to print error messages) + * @regs: NAND controller registers + * @ahb_clk: NAND controller AHB clock + * @mod_clk: NAND controller mod clock + * @reset: NAND controller reset line + * @assigned_cs: bitmask describing already assigned CS lines + * @clk_rate: NAND controller current clock rate + * @chips: a list containing all the NAND chips attached to this NAND + * controller + * @complete: a completion object used to wait for NAND controller events + * @dmac: the DMA channel attached to the NAND controller */ struct sunxi_nfc { struct nand_controller controller; @@ -339,13 +324,11 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc) return ret; } -static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf, +static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf, int chunksize, int nchunks, enum dma_data_direction ddir, struct scatterlist *sg) { - struct nand_chip *nand = mtd_to_nand(mtd); - struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct dma_async_tx_descriptor *dmad; enum dma_transfer_direction tdir; dma_cookie_t dmat; @@ -388,38 +371,16 @@ err_unmap_buf: return ret; } -static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd, +static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc, enum dma_data_direction ddir, struct scatterlist *sg) { - struct nand_chip *nand = mtd_to_nand(mtd); - struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); - dma_unmap_sg(nfc->dev, sg, 1, ddir); writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, nfc->regs + NFC_REG_CTL); } -static int sunxi_nfc_dev_ready(struct nand_chip *nand) -{ - struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); - struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); - u32 mask; - - if (sunxi_nand->selected < 0) - return 0; - - if (sunxi_nand->sels[sunxi_nand->selected].rb < 0) { - dev_err(nfc->dev, "cannot check R/B NAND status!\n"); - return 0; - } - - mask = NFC_RB_STATE(sunxi_nand->sels[sunxi_nand->selected].rb); - - return !!(readl(nfc->regs + NFC_REG_ST) & mask); -} - -static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip) +static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs) { struct mtd_info *mtd = nand_to_mtd(nand); struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); @@ -427,40 +388,27 @@ static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip) struct sunxi_nand_chip_sel *sel; u32 ctl; - if (chip > 0 && chip >= sunxi_nand->nsels) - return; - - if (chip == sunxi_nand->selected) + if (cs > 0 && cs >= sunxi_nand->nsels) return; ctl = readl(nfc->regs + NFC_REG_CTL) & ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN); - if (chip >= 0) { - sel = &sunxi_nand->sels[chip]; + sel = &sunxi_nand->sels[cs]; + ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift); + if (sel->rb >= 0) + ctl |= NFC_RB_SEL(sel->rb); - ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | - NFC_PAGE_SHIFT(nand->page_shift); - if (sel->rb < 0) { - nand->legacy.dev_ready = NULL; - } else { - nand->legacy.dev_ready = sunxi_nfc_dev_ready; - ctl |= NFC_RB_SEL(sel->rb); - } - - writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA); + writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA); - if (nfc->clk_rate != sunxi_nand->clk_rate) { - clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate); - nfc->clk_rate = sunxi_nand->clk_rate; - } + if (nfc->clk_rate != sunxi_nand->clk_rate) { + clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate); + nfc->clk_rate = sunxi_nand->clk_rate; } writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL); writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG); writel(ctl, nfc->regs + NFC_REG_CTL); - - sunxi_nand->selected = chip; } static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len) @@ -537,71 +485,6 @@ static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf, } } -static uint8_t sunxi_nfc_read_byte(struct nand_chip *nand) -{ - uint8_t ret = 0; - - sunxi_nfc_read_buf(nand, &ret, 1); - - return ret; -} - -static void sunxi_nfc_cmd_ctrl(struct nand_chip *nand, int dat, - unsigned int ctrl) -{ - struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); - struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); - int ret; - - if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) && - !(ctrl & (NAND_CLE | NAND_ALE))) { - u32 cmd = 0; - - if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles) - return; - - if (sunxi_nand->cmd_cycles--) - cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0]; - - if (sunxi_nand->cmd_cycles--) { - cmd |= NFC_SEND_CMD2; - writel(sunxi_nand->cmd[1], - nfc->regs + NFC_REG_RCMD_SET); - } - - sunxi_nand->cmd_cycles = 0; - - if (sunxi_nand->addr_cycles) { - cmd |= NFC_SEND_ADR | - NFC_ADR_NUM(sunxi_nand->addr_cycles); - writel(sunxi_nand->addr[0], - nfc->regs + NFC_REG_ADDR_LOW); - } - - if (sunxi_nand->addr_cycles > 4) - writel(sunxi_nand->addr[1], - nfc->regs + NFC_REG_ADDR_HIGH); - - ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); - if (ret) - return; - - writel(cmd, nfc->regs + NFC_REG_CMD); - sunxi_nand->addr[0] = 0; - sunxi_nand->addr[1] = 0; - sunxi_nand->addr_cycles = 0; - sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); - } - - if (ctrl & NAND_CLE) { - sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat; - } else if (ctrl & NAND_ALE) { - sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |= - dat << ((sunxi_nand->addr_cycles % 4) * 8); - sunxi_nand->addr_cycles++; - } -} - /* These seed values have been extracted from Allwinner's BSP */ static const u16 sunxi_nfc_randomizer_page_seeds[] = { 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72, @@ -684,8 +567,10 @@ static u16 sunxi_nfc_randomizer_step(u16 state, int count) return state; } -static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc) +static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page, + bool ecc) { + struct mtd_info *mtd = nand_to_mtd(nand); const u16 *seeds = sunxi_nfc_randomizer_page_seeds; int mod = mtd_div_by_ws(mtd->erasesize, mtd); @@ -702,10 +587,9 @@ static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc) return seeds[page % mod]; } -static void sunxi_nfc_randomizer_config(struct mtd_info *mtd, - int page, bool ecc) +static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page, + bool ecc) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); u16 state; @@ -714,14 +598,13 @@ static void sunxi_nfc_randomizer_config(struct mtd_info *mtd, return; ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); - state = sunxi_nfc_randomizer_state(mtd, page, ecc); + state = sunxi_nfc_randomizer_state(nand, page, ecc); ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK; writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL); } -static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd) +static void sunxi_nfc_randomizer_enable(struct nand_chip *nand) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); if (!(nand->options & NAND_NEED_SCRAMBLING)) @@ -731,9 +614,8 @@ static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd) nfc->regs + NFC_REG_ECC_CTL); } -static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd) +static void sunxi_nfc_randomizer_disable(struct nand_chip *nand) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); if (!(nand->options & NAND_NEED_SCRAMBLING)) @@ -743,36 +625,35 @@ static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd) nfc->regs + NFC_REG_ECC_CTL); } -static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm) +static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm) { - u16 state = sunxi_nfc_randomizer_state(mtd, page, true); + u16 state = sunxi_nfc_randomizer_state(nand, page, true); bbm[0] ^= state; bbm[1] ^= sunxi_nfc_randomizer_step(state, 8); } -static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd, +static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand, const uint8_t *buf, int len, bool ecc, int page) { - sunxi_nfc_randomizer_config(mtd, page, ecc); - sunxi_nfc_randomizer_enable(mtd); - sunxi_nfc_write_buf(mtd_to_nand(mtd), buf, len); - sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_randomizer_config(nand, page, ecc); + sunxi_nfc_randomizer_enable(nand); + sunxi_nfc_write_buf(nand, buf, len); + sunxi_nfc_randomizer_disable(nand); } -static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf, +static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf, int len, bool ecc, int page) { - sunxi_nfc_randomizer_config(mtd, page, ecc); - sunxi_nfc_randomizer_enable(mtd); - sunxi_nfc_read_buf(mtd_to_nand(mtd), buf, len); - sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_randomizer_config(nand, page, ecc); + sunxi_nfc_randomizer_enable(nand); + sunxi_nfc_read_buf(nand, buf, len); + sunxi_nfc_randomizer_disable(nand); } -static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd) +static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct sunxi_nand_hw_ecc *data = nand->ecc.priv; u32 ecc_ctl; @@ -789,9 +670,8 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd) writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL); } -static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd) +static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN, @@ -811,10 +691,9 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); } -static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob, +static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob, int step, bool bbm, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)), @@ -822,21 +701,20 @@ static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob, /* De-randomize the Bad Block Marker. */ if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) - sunxi_nfc_randomize_bbm(mtd, page, oob); + sunxi_nfc_randomize_bbm(nand, page, oob); } -static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd, +static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand, const u8 *oob, int step, bool bbm, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); u8 user_data[4]; /* Randomize the Bad Block Marker. */ if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) { memcpy(user_data, oob, sizeof(user_data)); - sunxi_nfc_randomize_bbm(mtd, page, user_data); + sunxi_nfc_randomize_bbm(nand, page, user_data); oob = user_data; } @@ -844,9 +722,11 @@ static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd, nfc->regs + NFC_REG_USER_DATA(step)); } -static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, +static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand, unsigned int *max_bitflips, int ret) { + struct mtd_info *mtd = nand_to_mtd(nand); + if (ret < 0) { mtd->ecc_stats.failed++; } else { @@ -855,10 +735,9 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, } } -static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, +static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob, int step, u32 status, bool *erased) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; u32 tmp; @@ -892,14 +771,13 @@ static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, return NFC_ECC_ERR_CNT(step, tmp); } -static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, +static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand, u8 *data, int data_off, u8 *oob, int oob_off, int *cur_off, unsigned int *max_bitflips, bool bbm, bool oob_required, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; int raw_mode = 0; @@ -909,7 +787,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, if (*cur_off != data_off) nand_change_read_column_op(nand, data_off, NULL, 0, false); - sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page); + sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page); if (data_off + ecc->size != oob_off) nand_change_read_column_op(nand, oob_off, NULL, 0, false); @@ -918,18 +796,18 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, if (ret) return ret; - sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_randomizer_enable(nand); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); - sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_randomizer_disable(nand); if (ret) return ret; *cur_off = oob_off + ecc->bytes + 4; - ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, + ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0, readl(nfc->regs + NFC_REG_ECC_ST), &erased); if (erased) @@ -961,24 +839,24 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, if (oob_required) { nand_change_read_column_op(nand, oob_off, NULL, 0, false); - sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, + sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4, true, page); - sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0, + sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0, bbm, page); } } - sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret); + sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret); return raw_mode; } -static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, +static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand, u8 *oob, int *cur_off, bool randomize, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); + struct mtd_info *mtd = nand_to_mtd(nand); struct nand_ecc_ctrl *ecc = &nand->ecc; int offset = ((ecc->bytes + 4) * ecc->steps); int len = mtd->oobsize - offset; @@ -993,20 +871,20 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, if (!randomize) sunxi_nfc_read_buf(nand, oob + offset, len); else - sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len, + sunxi_nfc_randomizer_read_buf(nand, oob + offset, len, false, page); if (cur_off) *cur_off = mtd->oobsize + mtd->writesize; } -static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, +static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf, int oob_required, int page, int nchunks) { - struct nand_chip *nand = mtd_to_nand(mtd); bool randomized = nand->options & NAND_NEED_SCRAMBLING; struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct mtd_info *mtd = nand_to_mtd(nand); struct nand_ecc_ctrl *ecc = &nand->ecc; unsigned int max_bitflips = 0; int ret, i, raw_mode = 0; @@ -1017,14 +895,14 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, if (ret) return ret; - ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks, + ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks, DMA_FROM_DEVICE, &sg); if (ret) return ret; - sunxi_nfc_hw_ecc_enable(mtd); - sunxi_nfc_randomizer_config(mtd, page, false); - sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); + sunxi_nfc_randomizer_config(nand, page, false); + sunxi_nfc_randomizer_enable(nand); writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); @@ -1038,10 +916,10 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, if (ret) dmaengine_terminate_all(nfc->dmac); - sunxi_nfc_randomizer_disable(mtd); - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_randomizer_disable(nand); + sunxi_nfc_hw_ecc_disable(nand); - sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg); + sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg); if (ret) return ret; @@ -1055,7 +933,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, u8 *oob = nand->oob_poi + oob_off; bool erased; - ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL, + ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL, oob_required ? oob : NULL, i, status, &erased); @@ -1069,14 +947,14 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, mtd->writesize + oob_off, oob, ecc->bytes + 4, false); - sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i, + sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i, !i, page); } if (erased) raw_mode = 1; - sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); + sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret); } if (status & NFC_ECC_ERR_MSK) { @@ -1111,25 +989,24 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, if (ret >= 0) raw_mode = 1; - sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); + sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret); } } if (oob_required) - sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi, + sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, NULL, !raw_mode, page); return max_bitflips; } -static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, +static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand, const u8 *data, int data_off, const u8 *oob, int oob_off, int *cur_off, bool bbm, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; int ret; @@ -1137,7 +1014,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, if (data_off != *cur_off) nand_change_write_column_op(nand, data_off, NULL, 0, false); - sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page); + sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page); if (data_off + ecc->size != oob_off) nand_change_write_column_op(nand, oob_off, NULL, 0, false); @@ -1146,15 +1023,15 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, if (ret) return ret; - sunxi_nfc_randomizer_enable(mtd); - sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page); + sunxi_nfc_randomizer_enable(nand); + sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); - sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_randomizer_disable(nand); if (ret) return ret; @@ -1163,11 +1040,11 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, return 0; } -static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, +static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand, u8 *oob, int *cur_off, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); + struct mtd_info *mtd = nand_to_mtd(nand); struct nand_ecc_ctrl *ecc = &nand->ecc; int offset = ((ecc->bytes + 4) * ecc->steps); int len = mtd->oobsize - offset; @@ -1179,32 +1056,34 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, nand_change_write_column_op(nand, offset + mtd->writesize, NULL, 0, false); - sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page); + sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page); if (cur_off) *cur_off = mtd->oobsize + mtd->writesize; } -static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf, +static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; unsigned int max_bitflips = 0; int ret, i, cur_off = 0; bool raw_mode = false; - nand_read_page_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_read_page_op(nand, page, 0, NULL, 0); - sunxi_nfc_hw_ecc_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); for (i = 0; i < ecc->steps; i++) { int data_off = i * ecc->size; int oob_off = i * (ecc->bytes + 4); u8 *data = buf + data_off; - u8 *oob = chip->oob_poi + oob_off; + u8 *oob = nand->oob_poi + oob_off; - ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, + ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob, oob_off + mtd->writesize, &cur_off, &max_bitflips, !i, oob_required, page); @@ -1215,52 +1094,55 @@ static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf, } if (oob_required) - sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off, + sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off, !raw_mode, page); - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_hw_ecc_disable(nand); return max_bitflips; } -static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *chip, u8 *buf, +static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); int ret; - nand_read_page_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_read_page_op(nand, page, 0, NULL, 0); - ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page, - chip->ecc.steps); + ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page, + nand->ecc.steps); if (ret >= 0) return ret; /* Fallback to PIO mode */ - return sunxi_nfc_hw_ecc_read_page(chip, buf, oob_required, page); + return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page); } -static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand, u32 data_offs, u32 readlen, u8 *bufpoi, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; int ret, i, cur_off = 0; unsigned int max_bitflips = 0; - nand_read_page_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_read_page_op(nand, page, 0, NULL, 0); - sunxi_nfc_hw_ecc_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); for (i = data_offs / ecc->size; i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) { int data_off = i * ecc->size; int oob_off = i * (ecc->bytes + 4); u8 *data = bufpoi + data_off; - u8 *oob = chip->oob_poi + oob_off; + u8 *oob = nand->oob_poi + oob_off; - ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, + ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob, oob_off + mtd->writesize, &cur_off, &max_bitflips, !i, @@ -1269,113 +1151,118 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip, return ret; } - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_hw_ecc_disable(nand); return max_bitflips; } -static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand, u32 data_offs, u32 readlen, u8 *buf, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); + int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size); int ret; - nand_read_page_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_read_page_op(nand, page, 0, NULL, 0); - ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks); + ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks); if (ret >= 0) return ret; /* Fallback to PIO mode */ - return sunxi_nfc_hw_ecc_read_subpage(chip, data_offs, readlen, + return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen, buf, page); } -static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand, const uint8_t *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; int ret, i, cur_off = 0; - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_prog_page_begin_op(nand, page, 0, NULL, 0); - sunxi_nfc_hw_ecc_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); for (i = 0; i < ecc->steps; i++) { int data_off = i * ecc->size; int oob_off = i * (ecc->bytes + 4); const u8 *data = buf + data_off; - const u8 *oob = chip->oob_poi + oob_off; + const u8 *oob = nand->oob_poi + oob_off; - ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, + ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob, oob_off + mtd->writesize, &cur_off, !i, page); if (ret) return ret; } - if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) - sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + if (oob_required || (nand->options & NAND_NEED_SCRAMBLING)) + sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi, &cur_off, page); - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_hw_ecc_disable(nand); - return nand_prog_page_end_op(chip); + return nand_prog_page_end_op(nand); } -static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand, u32 data_offs, u32 data_len, const u8 *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; int ret, i, cur_off = 0; - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); - sunxi_nfc_hw_ecc_enable(mtd); + nand_prog_page_begin_op(nand, page, 0, NULL, 0); + + sunxi_nfc_hw_ecc_enable(nand); for (i = data_offs / ecc->size; i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) { int data_off = i * ecc->size; int oob_off = i * (ecc->bytes + 4); const u8 *data = buf + data_off; - const u8 *oob = chip->oob_poi + oob_off; + const u8 *oob = nand->oob_poi + oob_off; - ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, + ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob, oob_off + mtd->writesize, &cur_off, !i, page); if (ret) return ret; } - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_hw_ecc_disable(nand); - return nand_prog_page_end_op(chip); + return nand_prog_page_end_op(nand); } -static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand, const u8 *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; struct scatterlist sg; int ret, i; + sunxi_nfc_select_chip(nand, nand->cur_cs); + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); if (ret) return ret; - ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps, + ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps, DMA_TO_DEVICE, &sg); if (ret) goto pio_fallback; @@ -1383,14 +1270,14 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip, for (i = 0; i < ecc->steps; i++) { const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4)); - sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page); + sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page); } - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + nand_prog_page_begin_op(nand, page, 0, NULL, 0); - sunxi_nfc_hw_ecc_enable(mtd); - sunxi_nfc_randomizer_config(mtd, page, false); - sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); + sunxi_nfc_randomizer_config(nand, page, false); + sunxi_nfc_randomizer_enable(nand); writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, nfc->regs + NFC_REG_WCMD_SET); @@ -1405,46 +1292,46 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip, if (ret) dmaengine_terminate_all(nfc->dmac); - sunxi_nfc_randomizer_disable(mtd); - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_randomizer_disable(nand); + sunxi_nfc_hw_ecc_disable(nand); - sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg); + sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg); if (ret) return ret; - if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) + if (oob_required || (nand->options & NAND_NEED_SCRAMBLING)) /* TODO: use DMA to transfer extra OOB bytes ? */ - sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi, NULL, page); - return nand_prog_page_end_op(chip); + return nand_prog_page_end_op(nand); pio_fallback: - return sunxi_nfc_hw_ecc_write_page(chip, buf, oob_required, page); + return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page); } -static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *chip, int page) +static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page) { - chip->pagebuf = -1; + nand->pagebuf = -1; - return chip->ecc.read_page(chip, chip->data_buf, 1, page); + return nand->ecc.read_page(nand, nand->data_buf, 1, page); } -static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *chip, int page) +static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); + struct mtd_info *mtd = nand_to_mtd(nand); int ret; - chip->pagebuf = -1; + nand->pagebuf = -1; - memset(chip->data_buf, 0xff, mtd->writesize); - ret = chip->ecc.write_page(chip, chip->data_buf, 1, page); + memset(nand->data_buf, 0xff, mtd->writesize); + ret = nand->ecc.write_page(nand, nand->data_buf, 1, page); if (ret) return ret; /* Send command to program the OOB data */ - return nand_prog_page_end_op(chip); + return nand_prog_page_end_op(nand); } static const s32 tWB_lut[] = {6, 12, 16, 20}; @@ -1471,8 +1358,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, const struct nand_data_interface *conf) { - struct sunxi_nand_chip *chip = to_sunxi_nand(nand); - struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller); + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); const struct nand_sdr_timings *timings; u32 min_clk_period = 0; s32 tWB, tADL, tWHR, tRHW, tCAD; @@ -1555,6 +1442,20 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, if (timings->tRHW_min > (min_clk_period * 20)) min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20); + /* + * In non-EDO, tREA should be less than tRP to guarantee that the + * controller does not sample the IO lines too early. Unfortunately, + * the sunxi NAND controller does not allow us to have different + * values for tRP and tREH (tRP = tREH = tRW / 2). + * + * We have 2 options to overcome this limitation: + * + * 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint + * 2/ Use EDO mode (only works if timings->tRLOH > 0) + */ + if (timings->tREA_max > min_clk_period && !timings->tRLOH_min) + min_clk_period = timings->tREA_max; + tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max, min_clk_period); if (tWB < 0) { @@ -1591,7 +1492,7 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, tCAD = 0x7; /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */ - chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD); + sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD); /* Convert min_clk_period from picoseconds to nanoseconds */ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000); @@ -1602,21 +1503,24 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, * This new formula was verified with a scope and validated by * Allwinner engineers. */ - chip->clk_rate = NSEC_PER_SEC / min_clk_period; - real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); + sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period; + real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate); if (real_clk_rate <= 0) { - dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate); + dev_err(nfc->dev, "Unable to round clk %lu\n", + sunxi_nand->clk_rate); return -EINVAL; } + sunxi_nand->timing_ctl = 0; + /* * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data * output cycle timings shall be used if the host drives tRC less than - * 30 ns. + * 30 ns. We should also use EDO mode if tREA is bigger than tRP. */ min_clk_period = NSEC_PER_SEC / real_clk_rate; - chip->timing_ctl = ((min_clk_period * 2) < 30) ? - NFC_TIMING_CTL_EDO : 0; + if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max) + sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO; return 0; } @@ -1677,14 +1581,13 @@ static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc) kfree(ecc->priv); } -static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, +static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, struct nand_ecc_ctrl *ecc, struct device_node *np) { static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 }; - struct nand_chip *nand = mtd_to_nand(mtd); - struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); - struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct mtd_info *mtd = nand_to_mtd(nand); struct sunxi_nand_hw_ecc *data; int nsectors; int ret; @@ -1808,7 +1711,6 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc) static int sunxi_nand_attach_chip(struct nand_chip *nand) { - struct mtd_info *mtd = nand_to_mtd(nand); struct nand_ecc_ctrl *ecc = &nand->ecc; struct device_node *np = nand_get_flash_node(nand); int ret; @@ -1831,7 +1733,7 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand) switch (ecc->mode) { case NAND_ECC_HW: - ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np); + ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np); if (ret) return ret; break; @@ -1845,15 +1747,165 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand) return 0; } +static int sunxi_nfc_exec_subop(struct nand_chip *nand, + const struct nand_subop *subop) +{ + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { }; + unsigned int i, j, remaining, start; + void *inbuf = NULL; + int ret; + + for (i = 0; i < subop->ninstrs; i++) { + const struct nand_op_instr *instr = &subop->instrs[i]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (cmd & NFC_SEND_CMD1) { + if (WARN_ON(cmd & NFC_SEND_CMD2)) + return -EINVAL; + + cmd |= NFC_SEND_CMD2; + extcmd |= instr->ctx.cmd.opcode; + } else { + cmd |= NFC_SEND_CMD1 | + NFC_CMD(instr->ctx.cmd.opcode); + } + break; + + case NAND_OP_ADDR_INSTR: + remaining = nand_subop_get_num_addr_cyc(subop, i); + start = nand_subop_get_addr_start_off(subop, i); + for (j = 0; j < 8 && j + start < remaining; j++) { + u32 addr = instr->ctx.addr.addrs[j + start]; + + addrs[j / 4] |= addr << (j % 4) * 8; + } + + if (j) + cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j); + + break; + + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + start = nand_subop_get_data_start_off(subop, i); + remaining = nand_subop_get_data_len(subop, i); + cnt = min_t(u32, remaining, NFC_SRAM_SIZE); + cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD; + + if (instr->type == NAND_OP_DATA_OUT_INSTR) { + cmd |= NFC_ACCESS_DIR; + memcpy_toio(nfc->regs + NFC_RAM0_BASE, + instr->ctx.data.buf.out + start, + cnt); + } else { + inbuf = instr->ctx.data.buf.in + start; + } + + break; + + case NAND_OP_WAITRDY_INSTR: + cmd |= NFC_WAIT_FLAG; + break; + } + } + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + return ret; + + if (cmd & NFC_SEND_ADR) { + writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW); + writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH); + } + + if (cmd & NFC_SEND_CMD2) + writel(extcmd, + nfc->regs + + (cmd & NFC_ACCESS_DIR ? + NFC_REG_WCMD_SET : NFC_REG_RCMD_SET)); + + if (cmd & NFC_DATA_TRANS) + writel(cnt, nfc->regs + NFC_REG_CNT); + + writel(cmd, nfc->regs + NFC_REG_CMD); + + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, + !(cmd & NFC_WAIT_FLAG) && cnt < 64, + 0); + if (ret) + return ret; + + if (inbuf) + memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt); + + return 0; +} + +static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand, + const struct nand_subop *subop) +{ + return nand_soft_waitrdy(nand, + subop->instrs[0].ctx.waitrdy.timeout_ms); +} + +static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)), + NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), +); + +static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)), + NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024), + NAND_OP_PARSER_PAT_CMD_ELEM(true)), + NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy, + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), +); + +static int sunxi_nfc_exec_op(struct nand_chip *nand, + const struct nand_operation *op, bool check_only) +{ + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + const struct nand_op_parser *parser; + + sunxi_nfc_select_chip(nand, op->cs); + + if (sunxi_nand->sels[op->cs].rb >= 0) + parser = &sunxi_nfc_op_parser; + else + parser = &sunxi_nfc_norb_op_parser; + + return nand_op_parser_exec_op(nand, parser, op, check_only); +} + static const struct nand_controller_ops sunxi_nand_controller_ops = { .attach_chip = sunxi_nand_attach_chip, .setup_data_interface = sunxi_nfc_setup_data_interface, + .exec_op = sunxi_nfc_exec_op, }; static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, struct device_node *np) { - struct sunxi_nand_chip *chip; + struct sunxi_nand_chip *sunxi_nand; struct mtd_info *mtd; struct nand_chip *nand; int nsels; @@ -1870,17 +1922,14 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, return -EINVAL; } - chip = devm_kzalloc(dev, - sizeof(*chip) + - (nsels * sizeof(struct sunxi_nand_chip_sel)), - GFP_KERNEL); - if (!chip) { + sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels), + GFP_KERNEL); + if (!sunxi_nand) { dev_err(dev, "could not allocate chip\n"); return -ENOMEM; } - chip->nsels = nsels; - chip->selected = -1; + sunxi_nand->nsels = nsels; for (i = 0; i < nsels; i++) { ret = of_property_read_u32_index(np, "reg", i, &tmp); @@ -1902,18 +1951,17 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, return -EINVAL; } - chip->sels[i].cs = tmp; + sunxi_nand->sels[i].cs = tmp; if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) && tmp < 2) - chip->sels[i].rb = tmp; + sunxi_nand->sels[i].rb = tmp; else - chip->sels[i].rb = -1; + sunxi_nand->sels[i].rb = -1; } - nand = &chip->nand; + nand = &sunxi_nand->nand; /* Default tR value specified in the ONFI spec (chapter 4.15.1) */ - nand->legacy.chip_delay = 200; nand->controller = &nfc->controller; nand->controller->ops = &sunxi_nand_controller_ops; @@ -1923,11 +1971,6 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, */ nand->ecc.mode = NAND_ECC_HW; nand_set_flash_node(nand, np); - nand->legacy.select_chip = sunxi_nfc_select_chip; - nand->legacy.cmd_ctrl = sunxi_nfc_cmd_ctrl; - nand->legacy.read_buf = sunxi_nfc_read_buf; - nand->legacy.write_buf = sunxi_nfc_write_buf; - nand->legacy.read_byte = sunxi_nfc_read_byte; mtd = nand_to_mtd(nand); mtd->dev.parent = dev; @@ -1943,7 +1986,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, return ret; } - list_add_tail(&chip->node, &nfc->chips); + list_add_tail(&sunxi_nand->node, &nfc->chips); return 0; } @@ -1973,14 +2016,15 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc) static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) { - struct sunxi_nand_chip *chip; + struct sunxi_nand_chip *sunxi_nand; while (!list_empty(&nfc->chips)) { - chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip, - node); - nand_release(&chip->nand); - sunxi_nand_ecc_cleanup(&chip->nand.ecc); - list_del(&chip->node); + sunxi_nand = list_first_entry(&nfc->chips, + struct sunxi_nand_chip, + node); + nand_release(&sunxi_nand->nand); + sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc); + list_del(&sunxi_nand->node); } } @@ -2124,7 +2168,7 @@ static struct platform_driver sunxi_nfc_driver = { }; module_platform_driver(sunxi_nfc_driver); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); MODULE_AUTHOR("Boris BREZILLON"); MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver"); MODULE_ALIAS("platform:sunxi_nand"); diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index f3b59e649b7d..db030f1701ee 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -104,6 +104,7 @@ struct tmio_nand { struct nand_chip chip; + struct completion comp; struct platform_device *dev; @@ -168,15 +169,11 @@ static int tmio_nand_dev_ready(struct nand_chip *chip) static irqreturn_t tmio_irq(int irq, void *__tmio) { struct tmio_nand *tmio = __tmio; - struct nand_chip *nand_chip = &tmio->chip; /* disable RDYREQ interrupt */ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); + complete(&tmio->comp); - if (unlikely(!waitqueue_active(&nand_chip->controller->wq))) - dev_warn(&tmio->dev->dev, "spurious interrupt\n"); - - wake_up(&nand_chip->controller->wq); return IRQ_HANDLED; } @@ -193,18 +190,18 @@ static int tmio_nand_wait(struct nand_chip *nand_chip) u8 status; /* enable RDYREQ interrupt */ + tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR); + reinit_completion(&tmio->comp); tmio_iowrite8(0x81, tmio->fcr + FCR_IMR); - timeout = wait_event_timeout(nand_chip->controller->wq, - tmio_nand_dev_ready(nand_chip), - msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20)); + timeout = 400; + timeout = wait_for_completion_timeout(&tmio->comp, + msecs_to_jiffies(timeout)); if (unlikely(!tmio_nand_dev_ready(nand_chip))) { tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); - dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n", - nand_chip->state == FL_ERASING ? "erase" : "program", - nand_chip->state == FL_ERASING ? 400 : 20); + dev_warn(&tmio->dev->dev, "still busy after 400 ms\n"); } else if (unlikely(!timeout)) { tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); @@ -378,6 +375,8 @@ static int tmio_probe(struct platform_device *dev) if (!tmio) return -ENOMEM; + init_completion(&tmio->comp); + tmio->dev = dev; platform_set_drvdata(dev, tmio); diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index e4141c20947a..0b49d8264bef 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -12,6 +12,8 @@ #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4) #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4) +#define GD5FXGQ4UEXXG_REG_STATUS2 0xf0 + static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -81,11 +83,83 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, return -EINVAL; } +static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section) + return -ERANGE; + + region->offset = 64; + region->length = 64; + + return 0; +} + +static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section) + return -ERANGE; + + /* Reserve 1 bytes for the BBM. */ + region->offset = 1; + region->length = 63; + + return 0; +} + +static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, + u8 status) +{ + u8 status2; + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQ4UEXXG_REG_STATUS2, + &status2); + int ret; + + switch (status & STATUS_ECC_MASK) { + case STATUS_ECC_NO_BITFLIPS: + return 0; + + case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: + /* + * Read status2 register to determine a more fine grained + * bit error status + */ + ret = spi_mem_exec_op(spinand->spimem, &op); + if (ret) + return ret; + + /* + * 4 ... 7 bits are flipped (1..4 can't be detected, so + * report the maximum of 4 in this case + */ + /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */ + return ((status & STATUS_ECC_MASK) >> 2) | + ((status2 & STATUS_ECC_MASK) >> 4); + + case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: + return 8; + + case STATUS_ECC_UNCOR_ERROR: + return -EBADMSG; + + default: + break; + } + + return -EINVAL; +} + static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = { .ecc = gd5fxgq4xa_ooblayout_ecc, .free = gd5fxgq4xa_ooblayout_free, }; +static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = { + .ecc = gd5fxgq4uexxg_ooblayout_ecc, + .free = gd5fxgq4uexxg_ooblayout_free, +}; + static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_INFO("GD5F1GQ4xA", 0xF1, NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1), @@ -114,6 +188,15 @@ static const struct spinand_info gigadevice_spinand_table[] = { 0, SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), + SPINAND_INFO("GD5F1GQ4UExxG", 0xd1, + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout, + gd5fxgq4uexxg_ecc_get_status)), }; static int gigadevice_spinand_detect(struct spinand_device *spinand) diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 98f6b9c4b684..d16b57081c95 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -10,6 +10,7 @@ #include <linux/mtd/spinand.h> #define SPINAND_MFR_MACRONIX 0xC2 +#define MACRONIX_ECCSR_MASK 0x0F static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -55,7 +56,12 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) SPI_MEM_OP_DUMMY(1, 1), SPI_MEM_OP_DATA_IN(1, eccsr, 1)); - return spi_mem_exec_op(spinand->spimem, &op); + int ret = spi_mem_exec_op(spinand->spimem, &op); + if (ret) + return ret; + + *eccsr &= MACRONIX_ECCSR_MASK; + return 0; } static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 081265557e70..db8021da45b5 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -25,19 +25,19 @@ static SPINAND_OP_VARIANTS(write_cache_variants, static SPINAND_OP_VARIANTS(update_cache_variants, SPINAND_PROG_LOAD(false, 0, NULL, 0)); -static int tc58cvg2s0h_ooblayout_ecc(struct mtd_info *mtd, int section, +static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { - if (section > 7) + if (section > 0) return -ERANGE; - region->offset = 128 + 16 * section; - region->length = 16; + region->offset = mtd->oobsize / 2; + region->length = mtd->oobsize / 2; return 0; } -static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section, +static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { if (section > 0) @@ -45,17 +45,17 @@ static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section, /* 2 bytes reserved for BBM */ region->offset = 2; - region->length = 126; + region->length = (mtd->oobsize / 2) - 2; return 0; } -static const struct mtd_ooblayout_ops tc58cvg2s0h_ooblayout = { - .ecc = tc58cvg2s0h_ooblayout_ecc, - .free = tc58cvg2s0h_ooblayout_free, +static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = { + .ecc = tc58cxgxsx_ooblayout_ecc, + .free = tc58cxgxsx_ooblayout_free, }; -static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand, +static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand, u8 status) { struct nand_device *nand = spinand_to_nand(spinand); @@ -94,15 +94,66 @@ static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand, } static const struct spinand_info toshiba_spinand_table[] = { - SPINAND_INFO("TC58CVG2S0H", 0xCD, + /* 3.3V 1Gb */ + SPINAND_INFO("TC58CVG0S3", 0xC2, + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 3.3V 2Gb */ + SPINAND_INFO("TC58CVG1S3", 0xCB, + NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 3.3V 4Gb */ + SPINAND_INFO("TC58CVG2S0", 0xCD, + NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 1.8V 1Gb */ + SPINAND_INFO("TC58CYG0S3", 0xB2, + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 1.8V 2Gb */ + SPINAND_INFO("TC58CYG1S3", 0xBB, + NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 1.8V 4Gb */ + SPINAND_INFO("TC58CYG2S0", 0xBD, NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&tc58cvg2s0h_ooblayout, - tc58cvg2s0h_ecc_get_status)), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), }; static int toshiba_spinand_detect(struct spinand_device *spinand) diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 44fe8018733c..dab986691267 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -7,14 +7,6 @@ menuconfig MTD_SPI_NOR if MTD_SPI_NOR -config MTD_MT81xx_NOR - tristate "Mediatek MT81xx SPI NOR flash controller" - depends on HAS_IOMEM - help - This enables access to SPI NOR flash, using MT81xx SPI NOR flash - controller. This controller does not support generic SPI BUS, it only - supports SPI NOR Flash. - config MTD_SPI_NOR_USE_4K_SECTORS bool "Use small 4096 B erase sectors" default y @@ -50,15 +42,6 @@ config SPI_CADENCE_QUADSPI device with a Cadence QSPI controller and want to access the Flash as an MTD device. -config SPI_FSL_QUADSPI - tristate "Freescale Quad SPI controller" - depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST - depends on HAS_IOMEM - help - This enables support for the Quad SPI controller in master mode. - This controller does not support generic SPI. It only supports - SPI NOR. - config SPI_HISI_SFC tristate "Hisilicon SPI-NOR Flash Controller(SFC)" depends on ARCH_HISI || COMPILE_TEST @@ -66,6 +49,14 @@ config SPI_HISI_SFC help This enables support for hisilicon SPI-NOR flash controller. +config SPI_MTK_QUADSPI + tristate "MediaTek Quad SPI controller" + depends on HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + This controller does not support generic SPI. It only supports + SPI NOR. + config SPI_NXP_SPIFI tristate "NXP SPI Flash Interface (SPIFI)" depends on OF && (ARCH_LPC18XX || COMPILE_TEST) diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index a552efd22958..189a15cca3ec 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -2,9 +2,8 @@ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o -obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o -obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o +obj-$(CONFIG_SPI_MTK_QUADSPI) += mtk-quadspi.o obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 04cedd3a2bf6..792628750eec 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -44,6 +44,12 @@ /* Quirks */ #define CQSPI_NEEDS_WR_DELAY BIT(0) +/* Capabilities mask */ +#define CQSPI_BASE_HWCAPS_MASK \ + (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST | \ + SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 | \ + SNOR_HWCAPS_PP) + struct cqspi_st; struct cqspi_flash_pdata { @@ -93,6 +99,11 @@ struct cqspi_st { struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; }; +struct cqspi_driver_platdata { + u32 hwcaps_mask; + u8 quirks; +}; + /* Operation timeout value */ #define CQSPI_TIMEOUT_MS 500 #define CQSPI_READ_TIMEOUT_MS 10 @@ -101,6 +112,7 @@ struct cqspi_st { #define CQSPI_INST_TYPE_SINGLE 0 #define CQSPI_INST_TYPE_DUAL 1 #define CQSPI_INST_TYPE_QUAD 2 +#define CQSPI_INST_TYPE_OCTAL 3 #define CQSPI_DUMMY_CLKS_PER_BYTE 8 #define CQSPI_DUMMY_BYTES_MAX 4 @@ -418,9 +430,10 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, void __iomem *reg_base = cqspi->iobase; unsigned int reg; unsigned int data; + u32 write_len; int ret; - if (n_tx > 4 || (n_tx && !txbuf)) { + if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { dev_err(nor->dev, "Invalid input argument, cmdlen %d txbuf 0x%p\n", n_tx, txbuf); @@ -433,10 +446,18 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; data = 0; - memcpy(&data, txbuf, n_tx); + write_len = (n_tx > 4) ? 4 : n_tx; + memcpy(&data, txbuf, write_len); + txbuf += write_len; writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); - } + if (n_tx > 4) { + data = 0; + write_len = n_tx - 4; + memcpy(&data, txbuf, write_len); + writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER); + } + } ret = cqspi_exec_flash_cmd(cqspi, reg); return ret; } @@ -911,6 +932,9 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read) case SNOR_PROTO_1_1_4: f_pdata->data_width = CQSPI_INST_TYPE_QUAD; break; + case SNOR_PROTO_1_1_8: + f_pdata->data_width = CQSPI_INST_TYPE_OCTAL; + break; default: return -EINVAL; } @@ -1213,21 +1237,23 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi) static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) { - const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ | - SNOR_HWCAPS_READ_FAST | - SNOR_HWCAPS_READ_1_1_2 | - SNOR_HWCAPS_READ_1_1_4 | - SNOR_HWCAPS_PP, - }; struct platform_device *pdev = cqspi->pdev; struct device *dev = &pdev->dev; + const struct cqspi_driver_platdata *ddata; + struct spi_nor_hwcaps hwcaps; struct cqspi_flash_pdata *f_pdata; struct spi_nor *nor; struct mtd_info *mtd; unsigned int cs; int i, ret; + ddata = of_device_get_match_data(dev); + if (!ddata) { + dev_err(dev, "Couldn't find driver data\n"); + return -EINVAL; + } + hwcaps.mask = ddata->hwcaps_mask; + /* Get flash device data */ for_each_available_child_of_node(dev->of_node, np) { ret = of_property_read_u32(np, "reg", &cs); @@ -1310,7 +1336,7 @@ static int cqspi_probe(struct platform_device *pdev) struct cqspi_st *cqspi; struct resource *res; struct resource *res_ahb; - unsigned long data; + const struct cqspi_driver_platdata *ddata; int ret; int irq; @@ -1377,8 +1403,8 @@ static int cqspi_probe(struct platform_device *pdev) } cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); - data = (unsigned long)of_device_get_match_data(dev); - if (data & CQSPI_NEEDS_WR_DELAY) + ddata = of_device_get_match_data(dev); + if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY)) cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC, cqspi->master_ref_clk_hz); @@ -1460,14 +1486,32 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = { #define CQSPI_DEV_PM_OPS NULL #endif +static const struct cqspi_driver_platdata cdns_qspi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK, +}; + +static const struct cqspi_driver_platdata k2g_qspi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK, + .quirks = CQSPI_NEEDS_WR_DELAY, +}; + +static const struct cqspi_driver_platdata am654_ospi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8, + .quirks = CQSPI_NEEDS_WR_DELAY, +}; + static const struct of_device_id cqspi_dt_ids[] = { { .compatible = "cdns,qspi-nor", - .data = (void *)0, + .data = &cdns_qspi, }, { .compatible = "ti,k2g-qspi", - .data = (void *)CQSPI_NEEDS_WR_DELAY, + .data = &k2g_qspi, + }, + { + .compatible = "ti,am654-ospi", + .data = &am654_ospi, }, { /* end of table */ } }; diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c deleted file mode 100644 index 1ff3430f82c8..000000000000 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ /dev/null @@ -1,1224 +0,0 @@ -/* - * Freescale QuadSPI driver. - * - * Copyright (C) 2013 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/platform_device.h> -#include <linux/sched.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/timer.h> -#include <linux/jiffies.h> -#include <linux/completion.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/partitions.h> -#include <linux/mtd/spi-nor.h> -#include <linux/mutex.h> -#include <linux/pm_qos.h> -#include <linux/sizes.h> - -/* Controller needs driver to swap endian */ -#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0) -/* Controller needs 4x internal clock */ -#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1) -/* - * TKT253890, Controller needs driver to fill txfifo till 16 byte to - * trigger data transfer even though extern data will not transferred. - */ -#define QUADSPI_QUIRK_TKT253890 (1 << 2) -/* Controller cannot wake up from wait mode, TKT245618 */ -#define QUADSPI_QUIRK_TKT245618 (1 << 3) - -/* The registers */ -#define QUADSPI_MCR 0x00 -#define QUADSPI_MCR_RESERVED_SHIFT 16 -#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT) -#define QUADSPI_MCR_MDIS_SHIFT 14 -#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT) -#define QUADSPI_MCR_CLR_TXF_SHIFT 11 -#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT) -#define QUADSPI_MCR_CLR_RXF_SHIFT 10 -#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT) -#define QUADSPI_MCR_DDR_EN_SHIFT 7 -#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT) -#define QUADSPI_MCR_END_CFG_SHIFT 2 -#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT) -#define QUADSPI_MCR_SWRSTHD_SHIFT 1 -#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT) -#define QUADSPI_MCR_SWRSTSD_SHIFT 0 -#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT) - -#define QUADSPI_IPCR 0x08 -#define QUADSPI_IPCR_SEQID_SHIFT 24 -#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT) - -#define QUADSPI_BUF0CR 0x10 -#define QUADSPI_BUF1CR 0x14 -#define QUADSPI_BUF2CR 0x18 -#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe - -#define QUADSPI_BUF3CR 0x1c -#define QUADSPI_BUF3CR_ALLMST_SHIFT 31 -#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT) -#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8 -#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT) - -#define QUADSPI_BFGENCR 0x20 -#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16 -#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT)) -#define QUADSPI_BFGENCR_SEQID_SHIFT 12 -#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT) - -#define QUADSPI_BUF0IND 0x30 -#define QUADSPI_BUF1IND 0x34 -#define QUADSPI_BUF2IND 0x38 -#define QUADSPI_SFAR 0x100 - -#define QUADSPI_SMPR 0x108 -#define QUADSPI_SMPR_DDRSMP_SHIFT 16 -#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT) -#define QUADSPI_SMPR_FSDLY_SHIFT 6 -#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT) -#define QUADSPI_SMPR_FSPHS_SHIFT 5 -#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT) -#define QUADSPI_SMPR_HSENA_SHIFT 0 -#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT) - -#define QUADSPI_RBSR 0x10c -#define QUADSPI_RBSR_RDBFL_SHIFT 8 -#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT) - -#define QUADSPI_RBCT 0x110 -#define QUADSPI_RBCT_WMRK_MASK 0x1F -#define QUADSPI_RBCT_RXBRD_SHIFT 8 -#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT) - -#define QUADSPI_TBSR 0x150 -#define QUADSPI_TBDR 0x154 -#define QUADSPI_SR 0x15c -#define QUADSPI_SR_IP_ACC_SHIFT 1 -#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT) -#define QUADSPI_SR_AHB_ACC_SHIFT 2 -#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT) - -#define QUADSPI_FR 0x160 -#define QUADSPI_FR_TFF_MASK 0x1 - -#define QUADSPI_SFA1AD 0x180 -#define QUADSPI_SFA2AD 0x184 -#define QUADSPI_SFB1AD 0x188 -#define QUADSPI_SFB2AD 0x18c -#define QUADSPI_RBDR 0x200 - -#define QUADSPI_LUTKEY 0x300 -#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0 - -#define QUADSPI_LCKCR 0x304 -#define QUADSPI_LCKER_LOCK 0x1 -#define QUADSPI_LCKER_UNLOCK 0x2 - -#define QUADSPI_RSER 0x164 -#define QUADSPI_RSER_TFIE (0x1 << 0) - -#define QUADSPI_LUT_BASE 0x310 - -/* - * The definition of the LUT register shows below: - * - * --------------------------------------------------- - * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | - * --------------------------------------------------- - */ -#define OPRND0_SHIFT 0 -#define PAD0_SHIFT 8 -#define INSTR0_SHIFT 10 -#define OPRND1_SHIFT 16 - -/* Instruction set for the LUT register. */ -#define LUT_STOP 0 -#define LUT_CMD 1 -#define LUT_ADDR 2 -#define LUT_DUMMY 3 -#define LUT_MODE 4 -#define LUT_MODE2 5 -#define LUT_MODE4 6 -#define LUT_FSL_READ 7 -#define LUT_FSL_WRITE 8 -#define LUT_JMP_ON_CS 9 -#define LUT_ADDR_DDR 10 -#define LUT_MODE_DDR 11 -#define LUT_MODE2_DDR 12 -#define LUT_MODE4_DDR 13 -#define LUT_FSL_READ_DDR 14 -#define LUT_FSL_WRITE_DDR 15 -#define LUT_DATA_LEARN 16 - -/* - * The PAD definitions for LUT register. - * - * The pad stands for the lines number of IO[0:3]. - * For example, the Quad read need four IO lines, so you should - * set LUT_PAD4 which means we use four IO lines. - */ -#define LUT_PAD1 0 -#define LUT_PAD2 1 -#define LUT_PAD4 2 - -/* Oprands for the LUT register. */ -#define ADDR24BIT 0x18 -#define ADDR32BIT 0x20 - -/* Macros for constructing the LUT register. */ -#define LUT0(ins, pad, opr) \ - (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \ - ((LUT_##ins) << INSTR0_SHIFT)) - -#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT) - -/* other macros for LUT register. */ -#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4) -#define QUADSPI_LUT_NUM 64 - -/* SEQID -- we can have 16 seqids at most. */ -#define SEQID_READ 0 -#define SEQID_WREN 1 -#define SEQID_WRDI 2 -#define SEQID_RDSR 3 -#define SEQID_SE 4 -#define SEQID_CHIP_ERASE 5 -#define SEQID_PP 6 -#define SEQID_RDID 7 -#define SEQID_WRSR 8 -#define SEQID_RDCR 9 -#define SEQID_EN4B 10 -#define SEQID_BRWR 11 - -#define QUADSPI_MIN_IOMAP SZ_4M - -enum fsl_qspi_devtype { - FSL_QUADSPI_VYBRID, - FSL_QUADSPI_IMX6SX, - FSL_QUADSPI_IMX7D, - FSL_QUADSPI_IMX6UL, - FSL_QUADSPI_LS1021A, - FSL_QUADSPI_LS2080A, -}; - -struct fsl_qspi_devtype_data { - enum fsl_qspi_devtype devtype; - int rxfifo; - int txfifo; - int ahb_buf_size; - int driver_data; -}; - -static const struct fsl_qspi_devtype_data vybrid_data = { - .devtype = FSL_QUADSPI_VYBRID, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN, -}; - -static const struct fsl_qspi_devtype_data imx6sx_data = { - .devtype = FSL_QUADSPI_IMX6SX, - .rxfifo = 128, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_4X_INT_CLK - | QUADSPI_QUIRK_TKT245618, -}; - -static const struct fsl_qspi_devtype_data imx7d_data = { - .devtype = FSL_QUADSPI_IMX7D, - .rxfifo = 512, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890 - | QUADSPI_QUIRK_4X_INT_CLK, -}; - -static const struct fsl_qspi_devtype_data imx6ul_data = { - .devtype = FSL_QUADSPI_IMX6UL, - .rxfifo = 128, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890 - | QUADSPI_QUIRK_4X_INT_CLK, -}; - -static struct fsl_qspi_devtype_data ls1021a_data = { - .devtype = FSL_QUADSPI_LS1021A, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = 0, -}; - -static const struct fsl_qspi_devtype_data ls2080a_data = { - .devtype = FSL_QUADSPI_LS2080A, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890, -}; - - -#define FSL_QSPI_MAX_CHIP 4 -struct fsl_qspi { - struct spi_nor nor[FSL_QSPI_MAX_CHIP]; - void __iomem *iobase; - void __iomem *ahb_addr; - u32 memmap_phy; - u32 memmap_offs; - u32 memmap_len; - struct clk *clk, *clk_en; - struct device *dev; - struct completion c; - const struct fsl_qspi_devtype_data *devtype_data; - u32 nor_size; - u32 nor_num; - u32 clk_rate; - unsigned int chip_base_addr; /* We may support two chips. */ - bool has_second_chip; - bool big_endian; - struct mutex lock; - struct pm_qos_request pm_qos_req; -}; - -static inline int needs_swap_endian(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN; -} - -static inline int needs_4x_clock(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK; -} - -static inline int needs_fill_txfifo(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890; -} - -static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618; -} - -/* - * R/W functions for big- or little-endian registers: - * The qSPI controller's endian is independent of the CPU core's endian. - * So far, although the CPU core is little-endian but the qSPI have two - * versions for big-endian and little-endian. - */ -static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) -{ - if (q->big_endian) - iowrite32be(val, addr); - else - iowrite32(val, addr); -} - -static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) -{ - if (q->big_endian) - return ioread32be(addr); - else - return ioread32(addr); -} - -/* - * An IC bug makes us to re-arrange the 32-bit data. - * The following chips, such as IMX6SLX, have fixed this bug. - */ -static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) -{ - return needs_swap_endian(q) ? __swab32(a) : a; -} - -static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q) -{ - qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); - qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); -} - -static inline void fsl_qspi_lock_lut(struct fsl_qspi *q) -{ - qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); - qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); -} - -static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) -{ - struct fsl_qspi *q = dev_id; - u32 reg; - - /* clear interrupt */ - reg = qspi_readl(q, q->iobase + QUADSPI_FR); - qspi_writel(q, reg, q->iobase + QUADSPI_FR); - - if (reg & QUADSPI_FR_TFF_MASK) - complete(&q->c); - - dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg); - return IRQ_HANDLED; -} - -static void fsl_qspi_init_lut(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - int rxfifo = q->devtype_data->rxfifo; - u32 lut_base; - int i; - - struct spi_nor *nor = &q->nor[0]; - u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT; - u8 read_op = nor->read_opcode; - u8 read_dm = nor->read_dummy; - - fsl_qspi_unlock_lut(q); - - /* Clear all the LUT table */ - for (i = 0; i < QUADSPI_LUT_NUM; i++) - qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4); - - /* Read */ - lut_base = SEQID_READ * 4; - - qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | - LUT1(FSL_READ, PAD4, rxfifo), - base + QUADSPI_LUT(lut_base + 1)); - - /* Write enable */ - lut_base = SEQID_WREN * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN), - base + QUADSPI_LUT(lut_base)); - - /* Page Program */ - lut_base = SEQID_PP * 4; - - qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) | - LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0), - base + QUADSPI_LUT(lut_base + 1)); - - /* Read Status */ - lut_base = SEQID_RDSR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) | - LUT1(FSL_READ, PAD1, 0x1), - base + QUADSPI_LUT(lut_base)); - - /* Erase a sector */ - lut_base = SEQID_SE * 4; - - qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) | - LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - - /* Erase the whole chip */ - lut_base = SEQID_CHIP_ERASE * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), - base + QUADSPI_LUT(lut_base)); - - /* READ ID */ - lut_base = SEQID_RDID * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) | - LUT1(FSL_READ, PAD1, 0x8), - base + QUADSPI_LUT(lut_base)); - - /* Write Register */ - lut_base = SEQID_WRSR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) | - LUT1(FSL_WRITE, PAD1, 0x2), - base + QUADSPI_LUT(lut_base)); - - /* Read Configuration Register */ - lut_base = SEQID_RDCR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) | - LUT1(FSL_READ, PAD1, 0x1), - base + QUADSPI_LUT(lut_base)); - - /* Write disable */ - lut_base = SEQID_WRDI * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI), - base + QUADSPI_LUT(lut_base)); - - /* Enter 4 Byte Mode (Micron) */ - lut_base = SEQID_EN4B * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B), - base + QUADSPI_LUT(lut_base)); - - /* Enter 4 Byte Mode (Spansion) */ - lut_base = SEQID_BRWR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR), - base + QUADSPI_LUT(lut_base)); - - fsl_qspi_lock_lut(q); -} - -/* Get the SEQID for the command */ -static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) -{ - switch (cmd) { - case SPINOR_OP_READ_1_1_4: - case SPINOR_OP_READ_1_1_4_4B: - return SEQID_READ; - case SPINOR_OP_WREN: - return SEQID_WREN; - case SPINOR_OP_WRDI: - return SEQID_WRDI; - case SPINOR_OP_RDSR: - return SEQID_RDSR; - case SPINOR_OP_SE: - return SEQID_SE; - case SPINOR_OP_CHIP_ERASE: - return SEQID_CHIP_ERASE; - case SPINOR_OP_PP: - return SEQID_PP; - case SPINOR_OP_RDID: - return SEQID_RDID; - case SPINOR_OP_WRSR: - return SEQID_WRSR; - case SPINOR_OP_RDCR: - return SEQID_RDCR; - case SPINOR_OP_EN4B: - return SEQID_EN4B; - case SPINOR_OP_BRWR: - return SEQID_BRWR; - default: - if (cmd == q->nor[0].erase_opcode) - return SEQID_SE; - dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd); - break; - } - return -EINVAL; -} - -static int -fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len) -{ - void __iomem *base = q->iobase; - int seqid; - u32 reg, reg2; - int err; - - init_completion(&q->c); - dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n", - q->chip_base_addr, addr, len, cmd); - - /* save the reg */ - reg = qspi_readl(q, base + QUADSPI_MCR); - - qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, - base + QUADSPI_SFAR); - qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, - base + QUADSPI_RBCT); - qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); - - do { - reg2 = qspi_readl(q, base + QUADSPI_SR); - if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) { - udelay(1); - dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2); - continue; - } - break; - } while (1); - - /* trigger the LUT now */ - seqid = fsl_qspi_get_seqid(q, cmd); - if (seqid < 0) - return seqid; - - qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, - base + QUADSPI_IPCR); - - /* Wait for the interrupt. */ - if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) { - dev_err(q->dev, - "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n", - cmd, addr, qspi_readl(q, base + QUADSPI_FR), - qspi_readl(q, base + QUADSPI_SR)); - err = -ETIMEDOUT; - } else { - err = 0; - } - - /* restore the MCR */ - qspi_writel(q, reg, base + QUADSPI_MCR); - - return err; -} - -/* Read out the data from the QUADSPI_RBDR buffer registers. */ -static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf) -{ - u32 tmp; - int i = 0; - - while (len > 0) { - tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4); - tmp = fsl_qspi_endian_xchg(q, tmp); - dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n", - q->chip_base_addr, tmp); - - if (len >= 4) { - *((u32 *)rxbuf) = tmp; - rxbuf += 4; - } else { - memcpy(rxbuf, &tmp, len); - break; - } - - len -= 4; - i++; - } -} - -/* - * If we have changed the content of the flash by writing or erasing, - * we need to invalidate the AHB buffer. If we do not do so, we may read out - * the wrong data. The spec tells us reset the AHB domain and Serial Flash - * domain at the same time. - */ -static inline void fsl_qspi_invalid(struct fsl_qspi *q) -{ - u32 reg; - - reg = qspi_readl(q, q->iobase + QUADSPI_MCR); - reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; - qspi_writel(q, reg, q->iobase + QUADSPI_MCR); - - /* - * The minimum delay : 1 AHB + 2 SFCK clocks. - * Delay 1 us is enough. - */ - udelay(1); - - reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); - qspi_writel(q, reg, q->iobase + QUADSPI_MCR); -} - -static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, - u8 opcode, unsigned int to, u32 *txbuf, - unsigned count) -{ - int ret, i, j; - u32 tmp; - - dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n", - q->chip_base_addr, to, count); - - /* clear the TX FIFO. */ - tmp = qspi_readl(q, q->iobase + QUADSPI_MCR); - qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); - - /* fill the TX data to the FIFO */ - for (j = 0, i = ((count + 3) / 4); j < i; j++) { - tmp = fsl_qspi_endian_xchg(q, *txbuf); - qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); - txbuf++; - } - - /* fill the TXFIFO upto 16 bytes for i.MX7d */ - if (needs_fill_txfifo(q)) - for (; i < 4; i++) - qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); - - /* Trigger it */ - ret = fsl_qspi_runcmd(q, opcode, to, count); - - if (ret == 0) - return count; - - return ret; -} - -static void fsl_qspi_set_map_addr(struct fsl_qspi *q) -{ - int nor_size = q->nor_size; - void __iomem *base = q->iobase; - - qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); - qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); - qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); - qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); -} - -/* - * There are two different ways to read out the data from the flash: - * the "IP Command Read" and the "AHB Command Read". - * - * The IC guy suggests we use the "AHB Command Read" which is faster - * then the "IP Command Read". (What's more is that there is a bug in - * the "IP Command Read" in the Vybrid.) - * - * After we set up the registers for the "AHB Command Read", we can use - * the memcpy to read the data directly. A "missed" access to the buffer - * causes the controller to clear the buffer, and use the sequence pointed - * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash. - */ -static int fsl_qspi_init_ahb_read(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - int seqid; - - /* AHB configuration for access buffer 0/1/2 .*/ - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); - /* - * Set ADATSZ with the maximum AHB buffer size to improve the - * read performance. - */ - qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | - ((q->devtype_data->ahb_buf_size / 8) - << QUADSPI_BUF3CR_ADATSZ_SHIFT), - base + QUADSPI_BUF3CR); - - /* We only use the buffer3 */ - qspi_writel(q, 0, base + QUADSPI_BUF0IND); - qspi_writel(q, 0, base + QUADSPI_BUF1IND); - qspi_writel(q, 0, base + QUADSPI_BUF2IND); - - /* Set the default lut sequence for AHB Read. */ - seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); - if (seqid < 0) - return seqid; - - qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, - q->iobase + QUADSPI_BFGENCR); - - return 0; -} - -/* This function was used to prepare and enable QSPI clock */ -static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) -{ - int ret; - - ret = clk_prepare_enable(q->clk_en); - if (ret) - return ret; - - ret = clk_prepare_enable(q->clk); - if (ret) { - clk_disable_unprepare(q->clk_en); - return ret; - } - - if (needs_wakeup_wait_mode(q)) - pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0); - - return 0; -} - -/* This function was used to disable and unprepare QSPI clock */ -static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) -{ - if (needs_wakeup_wait_mode(q)) - pm_qos_remove_request(&q->pm_qos_req); - - clk_disable_unprepare(q->clk); - clk_disable_unprepare(q->clk_en); - -} - -/* We use this function to do some basic init for spi_nor_scan(). */ -static int fsl_qspi_nor_setup(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - u32 reg; - int ret; - - /* disable and unprepare clock to avoid glitch pass to controller */ - fsl_qspi_clk_disable_unprep(q); - - /* the default frequency, we will change it in the future. */ - ret = clk_set_rate(q->clk, 66000000); - if (ret) - return ret; - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - /* Reset the module */ - qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, - base + QUADSPI_MCR); - udelay(1); - - /* Init the LUT table. */ - fsl_qspi_init_lut(q); - - /* Disable the module */ - qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, - base + QUADSPI_MCR); - - reg = qspi_readl(q, base + QUADSPI_SMPR); - qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK - | QUADSPI_SMPR_FSPHS_MASK - | QUADSPI_SMPR_HSENA_MASK - | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); - - /* Enable the module */ - qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, - base + QUADSPI_MCR); - - /* clear all interrupt status */ - qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); - - /* enable the interrupt */ - qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); - - return 0; -} - -static int fsl_qspi_nor_setup_last(struct fsl_qspi *q) -{ - unsigned long rate = q->clk_rate; - int ret; - - if (needs_4x_clock(q)) - rate *= 4; - - /* disable and unprepare clock to avoid glitch pass to controller */ - fsl_qspi_clk_disable_unprep(q); - - ret = clk_set_rate(q->clk, rate); - if (ret) - return ret; - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - /* Init the LUT table again. */ - fsl_qspi_init_lut(q); - - /* Init for AHB read */ - return fsl_qspi_init_ahb_read(q); -} - -static const struct of_device_id fsl_qspi_dt_ids[] = { - { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, }, - { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, }, - { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, }, - { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, }, - { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, }, - { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); - -static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor) -{ - q->chip_base_addr = q->nor_size * (nor - q->nor); -} - -static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) -{ - int ret; - struct fsl_qspi *q = nor->priv; - - ret = fsl_qspi_runcmd(q, opcode, 0, len); - if (ret) - return ret; - - fsl_qspi_read_data(q, len, buf); - return 0; -} - -static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - if (!buf) { - ret = fsl_qspi_runcmd(q, opcode, 0, 1); - if (ret) - return ret; - - if (opcode == SPINOR_OP_CHIP_ERASE) - fsl_qspi_invalid(q); - - } else if (len > 0) { - ret = fsl_qspi_nor_write(q, nor, opcode, 0, - (u32 *)buf, len); - if (ret > 0) - return 0; - } else { - dev_err(q->dev, "invalid cmd %d\n", opcode); - ret = -EINVAL; - } - - return ret; -} - -static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to, - size_t len, const u_char *buf) -{ - struct fsl_qspi *q = nor->priv; - ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to, - (u32 *)buf, len); - - /* invalid the data in the AHB buffer. */ - fsl_qspi_invalid(q); - return ret; -} - -static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from, - size_t len, u_char *buf) -{ - struct fsl_qspi *q = nor->priv; - u8 cmd = nor->read_opcode; - - /* if necessary,ioremap buffer before AHB read, */ - if (!q->ahb_addr) { - q->memmap_offs = q->chip_base_addr + from; - q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP; - - q->ahb_addr = ioremap_nocache( - q->memmap_phy + q->memmap_offs, - q->memmap_len); - if (!q->ahb_addr) { - dev_err(q->dev, "ioremap failed\n"); - return -ENOMEM; - } - /* ioremap if the data requested is out of range */ - } else if (q->chip_base_addr + from < q->memmap_offs - || q->chip_base_addr + from + len > - q->memmap_offs + q->memmap_len) { - iounmap(q->ahb_addr); - - q->memmap_offs = q->chip_base_addr + from; - q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP; - q->ahb_addr = ioremap_nocache( - q->memmap_phy + q->memmap_offs, - q->memmap_len); - if (!q->ahb_addr) { - dev_err(q->dev, "ioremap failed\n"); - return -ENOMEM; - } - } - - dev_dbg(q->dev, "cmd [%x],read from %p, len:%zd\n", - cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, - len); - - /* Read out the data directly from the AHB buffer.*/ - memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, - len); - - return len; -} - -static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n", - nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs); - - ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0); - if (ret) - return ret; - - fsl_qspi_invalid(q); - return 0; -} - -static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - mutex_lock(&q->lock); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - goto err_mutex; - - fsl_qspi_set_base_addr(q, nor); - return 0; - -err_mutex: - mutex_unlock(&q->lock); - return ret; -} - -static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) -{ - struct fsl_qspi *q = nor->priv; - - fsl_qspi_clk_disable_unprep(q); - mutex_unlock(&q->lock); -} - -static int fsl_qspi_probe(struct platform_device *pdev) -{ - const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ_1_1_4 | - SNOR_HWCAPS_PP, - }; - struct device_node *np = pdev->dev.of_node; - struct device *dev = &pdev->dev; - struct fsl_qspi *q; - struct resource *res; - struct spi_nor *nor; - struct mtd_info *mtd; - int ret, i = 0; - - q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); - if (!q) - return -ENOMEM; - - q->nor_num = of_get_child_count(dev->of_node); - if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP) - return -ENODEV; - - q->dev = dev; - q->devtype_data = of_device_get_match_data(dev); - if (!q->devtype_data) - return -ENODEV; - platform_set_drvdata(pdev, q); - - /* find the resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); - q->iobase = devm_ioremap_resource(dev, res); - if (IS_ERR(q->iobase)) - return PTR_ERR(q->iobase); - - q->big_endian = of_property_read_bool(np, "big-endian"); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "QuadSPI-memory"); - if (!devm_request_mem_region(dev, res->start, resource_size(res), - res->name)) { - dev_err(dev, "can't request region for resource %pR\n", res); - return -EBUSY; - } - - q->memmap_phy = res->start; - - /* find the clocks */ - q->clk_en = devm_clk_get(dev, "qspi_en"); - if (IS_ERR(q->clk_en)) - return PTR_ERR(q->clk_en); - - q->clk = devm_clk_get(dev, "qspi"); - if (IS_ERR(q->clk)) - return PTR_ERR(q->clk); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) { - dev_err(dev, "can not enable the clock\n"); - goto clk_failed; - } - - /* find the irq */ - ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "failed to get the irq: %d\n", ret); - goto irq_failed; - } - - ret = devm_request_irq(dev, ret, - fsl_qspi_irq_handler, 0, pdev->name, q); - if (ret) { - dev_err(dev, "failed to request irq: %d\n", ret); - goto irq_failed; - } - - ret = fsl_qspi_nor_setup(q); - if (ret) - goto irq_failed; - - if (of_get_property(np, "fsl,qspi-has-second-chip", NULL)) - q->has_second_chip = true; - - mutex_init(&q->lock); - - /* iterate the subnodes. */ - for_each_available_child_of_node(dev->of_node, np) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - - nor = &q->nor[i]; - mtd = &nor->mtd; - - nor->dev = dev; - spi_nor_set_flash_node(nor, np); - nor->priv = q; - - if (q->nor_num > 1 && !mtd->name) { - int spiflash_idx; - - ret = of_property_read_u32(np, "reg", &spiflash_idx); - if (!ret) { - mtd->name = devm_kasprintf(dev, GFP_KERNEL, - "%s-%d", - dev_name(dev), - spiflash_idx); - if (!mtd->name) { - ret = -ENOMEM; - goto mutex_failed; - } - } else { - dev_warn(dev, "reg property is missing\n"); - } - } - - /* fill the hooks */ - nor->read_reg = fsl_qspi_read_reg; - nor->write_reg = fsl_qspi_write_reg; - nor->read = fsl_qspi_read; - nor->write = fsl_qspi_write; - nor->erase = fsl_qspi_erase; - - nor->prepare = fsl_qspi_prep; - nor->unprepare = fsl_qspi_unprep; - - ret = of_property_read_u32(np, "spi-max-frequency", - &q->clk_rate); - if (ret < 0) - goto mutex_failed; - - /* set the chip address for READID */ - fsl_qspi_set_base_addr(q, nor); - - ret = spi_nor_scan(nor, NULL, &hwcaps); - if (ret) - goto mutex_failed; - - ret = mtd_device_register(mtd, NULL, 0); - if (ret) - goto mutex_failed; - - /* Set the correct NOR size now. */ - if (q->nor_size == 0) { - q->nor_size = mtd->size; - - /* Map the SPI NOR to accessiable address */ - fsl_qspi_set_map_addr(q); - } - - /* - * The TX FIFO is 64 bytes in the Vybrid, but the Page Program - * may writes 265 bytes per time. The write is working in the - * unit of the TX FIFO, not in the unit of the SPI NOR's page - * size. - * - * So shrink the spi_nor->page_size if it is larger then the - * TX FIFO. - */ - if (nor->page_size > q->devtype_data->txfifo) - nor->page_size = q->devtype_data->txfifo; - - i++; - } - - /* finish the rest init. */ - ret = fsl_qspi_nor_setup_last(q); - if (ret) - goto last_init_failed; - - fsl_qspi_clk_disable_unprep(q); - return 0; - -last_init_failed: - for (i = 0; i < q->nor_num; i++) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - mtd_device_unregister(&q->nor[i].mtd); - } -mutex_failed: - mutex_destroy(&q->lock); -irq_failed: - fsl_qspi_clk_disable_unprep(q); -clk_failed: - dev_err(dev, "Freescale QuadSPI probe failed\n"); - return ret; -} - -static int fsl_qspi_remove(struct platform_device *pdev) -{ - struct fsl_qspi *q = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < q->nor_num; i++) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - mtd_device_unregister(&q->nor[i].mtd); - } - - /* disable the hardware */ - qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); - qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); - - mutex_destroy(&q->lock); - - if (q->ahb_addr) - iounmap(q->ahb_addr); - - return 0; -} - -static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state) -{ - return 0; -} - -static int fsl_qspi_resume(struct platform_device *pdev) -{ - int ret; - struct fsl_qspi *q = platform_get_drvdata(pdev); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - fsl_qspi_nor_setup(q); - fsl_qspi_set_map_addr(q); - fsl_qspi_nor_setup_last(q); - - fsl_qspi_clk_disable_unprep(q); - - return 0; -} - -static struct platform_driver fsl_qspi_driver = { - .driver = { - .name = "fsl-quadspi", - .of_match_table = fsl_qspi_dt_ids, - }, - .probe = fsl_qspi_probe, - .remove = fsl_qspi_remove, - .suspend = fsl_qspi_suspend, - .resume = fsl_qspi_resume, -}; -module_platform_driver(fsl_qspi_driver); - -MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver"); -MODULE_AUTHOR("Freescale Semiconductor Inc."); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index 5442993b71ff..d9eed6844ba1 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -431,7 +431,8 @@ static int mtk_nor_init(struct mtk_nor *mtk_nor, struct device_node *flash_node) { const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ_FAST | + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_PP, }; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 6e13bbd1aaa5..fae147452aff 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -68,7 +68,7 @@ enum spi_nor_read_command_index { SNOR_CMD_READ_4_4_4, SNOR_CMD_READ_1_4_4_DTR, - /* Octo SPI */ + /* Octal SPI */ SNOR_CMD_READ_1_1_8, SNOR_CMD_READ_1_8_8, SNOR_CMD_READ_8_8_8, @@ -85,7 +85,7 @@ enum spi_nor_pp_command_index { SNOR_CMD_PP_1_4_4, SNOR_CMD_PP_4_4_4, - /* Octo SPI */ + /* Octal SPI */ SNOR_CMD_PP_1_1_8, SNOR_CMD_PP_1_8_8, SNOR_CMD_PP_8_8_8, @@ -278,6 +278,7 @@ struct flash_info { #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ #define USE_CLSR BIT(14) /* use CLSR command */ +#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */ /* Part specific fixup hooks. */ const struct spi_nor_fixups *fixups; @@ -398,6 +399,8 @@ static u8 spi_nor_convert_3to4_read(u8 opcode) { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, + { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, + { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, @@ -414,6 +417,8 @@ static u8 spi_nor_convert_3to4_program(u8 opcode) { SPINOR_OP_PP, SPINOR_OP_PP_4B }, { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, + { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, + { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, }; return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, @@ -1740,7 +1745,11 @@ static const struct flash_info spi_nor_ids[] = { { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, + { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ) }, { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) }, + { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ) }, { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) }, @@ -1836,6 +1845,8 @@ static const struct flash_info spi_nor_ids[] = { { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) }, + { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) }, { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) }, { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, @@ -1847,6 +1858,8 @@ static const struct flash_info spi_nor_ids[] = { SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) .fixups = &mx25l25635_fixups }, { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, + { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, @@ -1872,7 +1885,8 @@ static const struct flash_info spi_nor_ids[] = { /* Micron */ { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512, - SECT_4K | USE_FSR | SPI_NOR_4B_OPCODES) + SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | + SPI_NOR_4B_OPCODES) }, /* PMC */ @@ -1885,13 +1899,17 @@ static const struct flash_info spi_nor_ids[] = { */ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) }, { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, - { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, - { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, @@ -3591,6 +3609,13 @@ static int spi_nor_init_params(struct spi_nor *nor, SNOR_PROTO_1_1_4); } + if (info->flags & SPI_NOR_OCTAL_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], + 0, 8, SPINOR_OP_READ_1_1_8, + SNOR_PROTO_1_1_8); + } + /* Page Program settings. */ params->hwcaps.mask |= SNOR_HWCAPS_PP; spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 485462d3087f..537c90c8eb0a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) } } - /* Link-local multicast packets should be passed to the - * stack on the link they arrive as well as pass them to the - * bond-master device. These packets are mostly usable when - * stack receives it with the link on which they arrive - * (e.g. LLDP) they also must be available on master. Some of - * the use cases include (but are not limited to): LLDP agents - * that must be able to operate both on enslaved interfaces as - * well as on bonds themselves; linux bridges that must be able - * to process/pass BPDUs from attached bonds when any kind of - * STP version is enabled on the network. + /* + * For packets determined by bond_should_deliver_exact_match() call to + * be suppressed we want to make an exception for link-local packets. + * This is necessary for e.g. LLDP daemons to be able to monitor + * inactive slave links without being forced to bind to them + * explicitly. + * + * At the same time, packets that are passed to the bonding master + * (including link-local ones) can have their originating interface + * determined via PACKET_ORIGDEV socket option. */ - if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); - - if (nskb) { - nskb->dev = bond->dev; - nskb->queue_mapping = 0; - netif_rx(nskb); - } - return RX_HANDLER_PASS; - } - if (bond_should_deliver_exact_match(skb, slave, bond)) + if (bond_should_deliver_exact_match(skb, slave, bond)) { + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) + return RX_HANDLER_PASS; return RX_HANDLER_EXACT; + } skb->dev = bond->dev; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0e4bbdcc614f..c76892ac4e69 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); } -static void b53_enable_vlan(struct b53_device *dev, bool enable) +static void b53_enable_vlan(struct b53_device *dev, bool enable, + bool enable_filtering) { u8 mgmt, vc0, vc1, vc4 = 0, vc5; @@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; - vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; - vc5 |= VC5_DROP_VTABLE_MISS; + if (enable_filtering) { + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + } else { + vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + vc5 &= ~VC5_DROP_VTABLE_MISS; + } if (is5325(dev)) vc0 &= ~VC0_RESERVED_1; @@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) } b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); + + dev->vlan_enabled = enable; + dev->vlan_filtering_enabled = enable_filtering; } static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) @@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev) b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); } +static u16 b53_default_pvid(struct b53_device *dev) +{ + if (is5325(dev) || is5365(dev)) + return 1; + else + return 0; +} + int b53_configure_vlan(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; struct b53_vlan vl = { 0 }; - int i; + int i, def_vid; + + def_vid = b53_default_pvid(dev); /* clear all vlan entries */ if (is5325(dev) || is5365(dev)) { - for (i = 1; i < dev->num_vlans; i++) + for (i = def_vid; i < dev->num_vlans; i++) b53_set_vlan_entry(dev, i, &vl); } else { b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, false); + b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, - B53_VLAN_PORT_DEF_TAG(i), 1); + B53_VLAN_PORT_DEF_TAG(i), def_vid); if (!is5325(dev) && !is5365(dev)) b53_set_jumbo(dev, dev->enable_jumbo, false); @@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { + struct b53_device *dev = ds->priv; + struct net_device *bridge_dev; + unsigned int i; + u16 pvid, new_pvid; + + /* Handle the case were multiple bridges span the same switch device + * and one of them has a different setting than what is being requested + * which would be breaking filtering semantics for any of the other + * bridge devices. + */ + b53_for_each_port(dev, i) { + bridge_dev = dsa_to_port(ds, i)->bridge_dev; + if (bridge_dev && + bridge_dev != dsa_to_port(ds, port)->bridge_dev && + br_vlan_enabled(bridge_dev) != vlan_filtering) { + netdev_err(bridge_dev, + "VLAN filtering is global to the switch!\n"); + return -EINVAL; + } + } + + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + new_pvid = pvid; + if (dev->vlan_filtering_enabled && !vlan_filtering) { + /* Filtering is currently enabled, use the default PVID since + * the bridge does not expect tagging anymore + */ + dev->ports[port].pvid = pvid; + new_pvid = b53_default_pvid(dev); + } else if (!dev->vlan_filtering_enabled && vlan_filtering) { + /* Filtering is currently disabled, restore the previous PVID */ + new_pvid = dev->ports[port].pvid; + } + + if (pvid != new_pvid) + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), + new_pvid); + + b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); + return 0; } EXPORT_SYMBOL(b53_vlan_filtering); @@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid_end > dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, true); + b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); return 0; } @@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port, b53_fast_age_vlan(dev, vid); } - if (pvid) { + if (pvid && !dsa_is_cpu_port(ds, port)) { b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), vlan->vid_end); b53_fast_age_vlan(dev, vid); @@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port, vl->members &= ~BIT(port); - if (pvid == vid) { - if (is5325(dev) || is5365(dev)) - pvid = 1; - else - pvid = 0; - } + if (pvid == vid) + pvid = b53_default_pvid(dev); if (untagged && !dsa_is_cpu_port(ds, port)) vl->untag &= ~(BIT(port)); @@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; - if (is5325(dev) || is5365(dev)) - pvid = 1; - else - pvid = 0; + pvid = b53_default_pvid(dev); /* Make this port join all VLANs without VLAN entries */ if (is58xx(dev)) { diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index ec796482792d..4dc7ee38b258 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -91,6 +91,7 @@ enum { struct b53_port { u16 vlan_ctl_mask; struct ethtool_eee eee; + u16 pvid; }; struct b53_vlan { @@ -137,6 +138,8 @@ struct b53_device { unsigned int num_vlans; struct b53_vlan *vlans; + bool vlan_enabled; + bool vlan_filtering_enabled; unsigned int num_ports; struct b53_port *ports; }; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 17ec32b0a1cc..14138d423cf1 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, { struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; /* Get the parent device WoL settings */ - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); /* Advertise the parent device supported settings */ wol->supported = pwol.supported; @@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->ports[port].cpu_dp->index; - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); if (wol->wolopts & ~pwol.supported) return -EINVAL; diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 693a67f45bef..ddc1f9ca8ebc 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1162,6 +1162,12 @@ static struct platform_driver gswip_driver = { module_platform_driver(gswip_driver); +MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 12fd7ce3f1ff..7e3c00bd9532 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -896,7 +896,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, default: return U64_MAX; } - value = (((u64)high) << 16) | low; + value = (((u64)high) << 32) | low; return value; } @@ -3093,7 +3093,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, - .stats_snapshot = mv88e6320_g1_stats_snapshot, + .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -4595,6 +4595,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, return 0; } +static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip) +{ + int i; + + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) + chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID; +} + static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, int port) { @@ -4631,6 +4639,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, if (err) goto free; + mv88e6xxx_ports_cmode_init(chip); + mutex_lock(&chip->reg_lock); err = mv88e6xxx_switch_reset(chip); mutex_unlock(&chip->reg_lock); diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index ebd26b6a93e6..79ab51e69aee 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -398,6 +398,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } + /* cmode doesn't change, nothing to do for us */ + if (cmode == chip->ports[port].cmode) + return 0; + lane = mv88e6390x_serdes_get_lane(chip, port); if (lane < 0) return lane; @@ -408,7 +412,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return err; } - err = mv88e6390_serdes_power(chip, port, false); + err = mv88e6390x_serdes_power(chip, port, false); if (err) return err; @@ -424,7 +428,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; - err = mv88e6390_serdes_power(chip, port, true); + err = mv88e6390x_serdes_power(chip, port, true); if (err) return err; diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index e583641de758..4aadf321edb7 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -52,6 +52,7 @@ #define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005 #define MV88E6185_PORT_STS_CMODE_PHY 0x0006 #define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007 +#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff /* Offset 0x01: MAC (or PCS or Physical) Control Register */ #define MV88E6XXX_PORT_MAC_CTL 0x01 diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index b58ca7cb8e9d..fbba300c1d01 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -275,6 +275,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) { + /* Tx TC/Queue number config */ + hw_atl_rpb_tps_tx_tc_mode_set(self, 1U); + hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 939f77e2e117..8ac7a67b15c1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1274,6 +1274,15 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en); } +void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw, + u32 tx_traf_class_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR, + HW_ATL_TPB_TX_TC_MODE_MSK, + HW_ATL_TPB_TX_TC_MODE_SHIFT, + tx_traf_class_mode); +} + void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, u32 tx_buff_hi_threshold_per_tc, u32 buffer) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 03c570d115fe..f529540bfd7e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -605,6 +605,10 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, /* tpb */ +/* set TX Traffic Class Mode */ +void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw, + u32 tx_traf_class_mode); + /* set tx buffer enable */ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 8470d92db812..e91ffce005f1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -1948,6 +1948,19 @@ /* default value of bitfield tx_buf_en */ #define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0 +/* register address for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900 +/* bitmask for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100 +/* inverted bitmask for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF +/* lower bit position of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8 +/* width of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1 +/* default value of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0 + /* tx tx{b}_hi_thresh[c:0] bitfield definitions * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". * parameter: buffer {b} | stride size 0x10 | range [0, 7] diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index bb41becb6609..31ff1e0d1baa 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl2_adapter *adapter; - static int cards_found; + static int cards_found = 0; unsigned long mmio_start; int mmio_len; int err; - cards_found = 0; - err = pci_enable_device(pdev); if (err) return err; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 28c9b0bdf2f6..bc3ac369cbe3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev, priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); reg = rxchk_readl(priv, RXCHK_CONTROL); + /* Clear L2 header checks, which would prevent BPDUs + * from being received. + */ + reg &= ~RXCHK_L2_HDR_DIS; if (priv->rx_chk_en) reg |= RXCHK_EN; else diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8bc7e495b027..803f7990d32b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -500,6 +500,12 @@ normal_tx: } length >>= 9; + if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { + dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", + skb->len); + i = 0; + goto tx_dma_error; + } flags |= bnxt_lhint_arr[length]; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); @@ -3903,7 +3909,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (len) break; /* on first few passes, just barely sleep */ - if (i < DFLT_HWRM_CMD_TIMEOUT) + if (i < HWRM_SHORT_TIMEOUT_COUNTER) usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); else @@ -3926,7 +3932,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, dma_rmb(); if (*valid) break; - udelay(1); + usleep_range(1, 5); } if (j >= HWRM_VALID_BIT_DELAY_USEC) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a451796deefe..2fb653e0048d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -582,7 +582,7 @@ struct nqe_cn { (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) -#define HWRM_VALID_BIT_DELAY_USEC 20 +#define HWRM_VALID_BIT_DELAY_USEC 150 #define BNXT_HWRM_CHNL_CHIMP 0 #define BNXT_HWRM_CHNL_KONG 1 diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index f4d81765221e..62636c1ed141 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -271,7 +271,7 @@ struct xcast_addr_list { }; struct nicvf_work { - struct delayed_work work; + struct work_struct work; u8 mode; struct xcast_addr_list *mc; }; @@ -327,7 +327,11 @@ struct nicvf { struct nicvf_work rx_mode_work; /* spinlock to protect workqueue arguments from concurrent access */ spinlock_t rx_mode_wq_lock; - + /* workqueue for handling kernel ndo_set_rx_mode() calls */ + struct workqueue_struct *nicvf_rx_mode_wq; + /* mutex to protect VF's mailbox contents from concurrent access */ + struct mutex rx_mode_mtx; + struct delayed_work link_change_work; /* PTP timestamp */ struct cavium_ptp *ptp_clock; /* Inbound timestamping is on */ @@ -575,10 +579,8 @@ struct set_ptp { struct xcast { u8 msg; - union { - u8 mode; - u64 mac; - } data; + u8 mode; + u64 mac:48; }; /* 128 bit shared memory between PF and each VF */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 6c8dcb65ff03..c90252829ed3 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -57,14 +57,8 @@ struct nicpf { #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) u8 *vf_lmac_map; - struct delayed_work dwork; - struct workqueue_struct *check_link; - u8 *link; - u8 *duplex; - u32 *speed; u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; - bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; /* MSI-X */ u8 num_vec; @@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp) nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); } +/* Get BGX LMAC link status and update corresponding VF + * if there is a change, valid only if internal L2 switch + * is not present otherwise VF link is always treated as up + */ +static void nic_link_status_get(struct nicpf *nic, u8 vf) +{ + union nic_mbx mbx = {}; + struct bgx_link_status link; + u8 bgx, lmac; + + mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; + + /* Get BGX, LMAC indices for the VF */ + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + /* Get interface link status */ + bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); + + /* Send a mbox message to VF with current link status */ + mbx.link_status.link_up = link.link_up; + mbx.link_status.duplex = link.duplex; + mbx.link_status.speed = link.speed; + mbx.link_status.mac_type = link.mac_type; + + /* reply with link status */ + nic_send_msg_to_vf(nic, vf, &mbx); +} + /* Interrupt handler to handle mailbox messages from VFs */ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) { @@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) int i; int ret = 0; - nic->mbx_lock[vf] = true; - mbx_addr = nic_get_mbx_addr(vf); mbx_data = (u64 *)&mbx; @@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) switch (mbx.msg.msg) { case NIC_MBOX_MSG_READY: nic_mbx_send_ready(nic, vf); - if (vf < nic->num_vf_en) { - nic->link[vf] = 0; - nic->duplex[vf] = 0; - nic->speed[vf] = 0; - } - goto unlock; + return; case NIC_MBOX_MSG_QS_CFG: reg_addr = NIC_PF_QSET_0_127_CFG | (mbx.qs.num << NIC_QS_ID_SHIFT); @@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_RSS_SIZE: nic_send_rss_size(nic, vf); - goto unlock; + return; case NIC_MBOX_MSG_RSS_CFG: case NIC_MBOX_MSG_RSS_CFG_CONT: nic_config_rss(nic, &mbx.rss_cfg); @@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_CFG_DONE: /* Last message of VF config msg sequence */ nic_enable_vf(nic, vf, true); - goto unlock; + break; case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ if (vf >= nic->num_vf_en) @@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_ALLOC_SQS: nic_alloc_sqs(nic, &mbx.sqs_alloc); - goto unlock; + return; case NIC_MBOX_MSG_NICVF_PTR: nic->nicvf[vf] = mbx.nicvf.nicvf; break; case NIC_MBOX_MSG_PNICVF_PTR: nic_send_pnicvf(nic, vf); - goto unlock; + return; case NIC_MBOX_MSG_SNICVF_PTR: nic_send_snicvf(nic, &mbx.nicvf); - goto unlock; + return; case NIC_MBOX_MSG_BGX_STATS: nic_get_bgx_stats(nic, &mbx.bgx_stats); - goto unlock; + return; case NIC_MBOX_MSG_LOOPBACK: ret = nic_config_loopback(nic, &mbx.lbk); break; @@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_PFC: nic_pause_frame(nic, vf, &mbx.pfc); - goto unlock; + return; case NIC_MBOX_MSG_PTP_CFG: nic_config_timestamp(nic, vf, &mbx.ptp); break; @@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); bgx_set_dmac_cam_filter(nic->node, bgx, lmac, - mbx.xcast.data.mac, + mbx.xcast.mac, vf < NIC_VF_PER_MBX_REG ? vf : vf - NIC_VF_PER_MBX_REG); break; @@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) } bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); + bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode); break; + case NIC_MBOX_MSG_BGX_LINK_CHANGE: + if (vf >= nic->num_vf_en) { + ret = -1; /* NACK */ + break; + } + nic_link_status_get(nic, vf); + return; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); @@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) mbx.msg.msg, vf); nic_mbx_send_nack(nic, vf); } -unlock: - nic->mbx_lock[vf] = false; } static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) @@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) return 0; } -/* Poll for BGX LMAC link status and update corresponding VF - * if there is a change, valid only if internal L2 switch - * is not present otherwise VF link is always treated as up - */ -static void nic_poll_for_link(struct work_struct *work) -{ - union nic_mbx mbx = {}; - struct nicpf *nic; - struct bgx_link_status link; - u8 vf, bgx, lmac; - - nic = container_of(work, struct nicpf, dwork.work); - - mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; - - for (vf = 0; vf < nic->num_vf_en; vf++) { - /* Poll only if VF is UP */ - if (!nic->vf_enabled[vf]) - continue; - - /* Get BGX, LMAC indices for the VF */ - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - /* Get interface link status */ - bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); - - /* Inform VF only if link status changed */ - if (nic->link[vf] == link.link_up) - continue; - - if (!nic->mbx_lock[vf]) { - nic->link[vf] = link.link_up; - nic->duplex[vf] = link.duplex; - nic->speed[vf] = link.speed; - - /* Send a mbox message to VF with current link status */ - mbx.link_status.link_up = link.link_up; - mbx.link_status.duplex = link.duplex; - mbx.link_status.speed = link.speed; - mbx.link_status.mac_type = link.mac_type; - nic_send_msg_to_vf(nic, vf, &mbx); - } - } - queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); -} - static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; @@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!nic->vf_lmac_map) goto err_release_regions; - nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->link) - goto err_release_regions; - - nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->duplex) - goto err_release_regions; - - nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL); - if (!nic->speed) - goto err_release_regions; - /* Initialize hardware */ nic_init_hw(nic); @@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_unregister_interrupts; - /* Register a physical link status poll fn() */ - nic->check_link = alloc_workqueue("check_link_status", - WQ_UNBOUND | WQ_MEM_RECLAIM, 1); - if (!nic->check_link) { - err = -ENOMEM; - goto err_disable_sriov; - } - - INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); - queue_delayed_work(nic->check_link, &nic->dwork, 0); - return 0; -err_disable_sriov: - if (nic->flags & NIC_SRIOV_ENABLED) - pci_disable_sriov(pdev); err_unregister_interrupts: nic_unregister_interrupts(nic); err_release_regions: @@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev) if (nic->flags & NIC_SRIOV_ENABLED) pci_disable_sriov(pdev); - if (nic->check_link) { - /* Destroy work Queue */ - cancel_delayed_work_sync(&nic->dwork); - destroy_workqueue(nic->check_link); - } - nic_unregister_interrupts(nic); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 88f8a8fa93cd..503cfadff4ac 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); -/* workqueue for handling kernel ndo_set_rx_mode() calls */ -static struct workqueue_struct *nicvf_rx_mode_wq; - static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) { if (nic->sqs_mode) @@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) { int timeout = NIC_MBOX_MSG_TIMEOUT; int sleep = 10; + int ret = 0; + + mutex_lock(&nic->rx_mode_mtx); nic->pf_acked = false; nic->pf_nacked = false; @@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) netdev_err(nic->netdev, "PF NACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); - return -EINVAL; + ret = -EINVAL; + break; } msleep(sleep); if (nic->pf_acked) @@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) netdev_err(nic->netdev, "PF didn't ACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); - return -EBUSY; + ret = -EBUSY; + break; } } - return 0; + mutex_unlock(&nic->rx_mode_mtx); + return ret; } /* Checks if VF is able to comminicate with PF @@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic) return 1; } +static void nicvf_send_cfg_done(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + if (nicvf_send_msg_to_pf(nic, &mbx)) { + netdev_err(nic->netdev, + "PF didn't respond to CFG DONE msg\n"); + } +} + static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) { if (bgx->rx) @@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) break; case NIC_MBOX_MSG_BGX_LINK_CHANGE: nic->pf_acked = true; - nic->link_up = mbx.link_status.link_up; - nic->duplex = mbx.link_status.duplex; - nic->speed = mbx.link_status.speed; - nic->mac_type = mbx.link_status.mac_type; - if (nic->link_up) { - netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", - nic->speed, - nic->duplex == DUPLEX_FULL ? - "Full" : "Half"); - netif_carrier_on(nic->netdev); - netif_tx_start_all_queues(nic->netdev); - } else { - netdev_info(nic->netdev, "Link is Down\n"); - netif_carrier_off(nic->netdev); - netif_tx_stop_all_queues(nic->netdev); + if (nic->link_up != mbx.link_status.link_up) { + nic->link_up = mbx.link_status.link_up; + nic->duplex = mbx.link_status.duplex; + nic->speed = mbx.link_status.speed; + nic->mac_type = mbx.link_status.mac_type; + if (nic->link_up) { + netdev_info(nic->netdev, + "Link is Up %d Mbps %s duplex\n", + nic->speed, + nic->duplex == DUPLEX_FULL ? + "Full" : "Half"); + netif_carrier_on(nic->netdev); + netif_tx_start_all_queues(nic->netdev); + } else { + netdev_info(nic->netdev, "Link is Down\n"); + netif_carrier_off(nic->netdev); + netif_tx_stop_all_queues(nic->netdev); + } } break; case NIC_MBOX_MSG_ALLOC_SQS: @@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; union nic_mbx mbx = {}; + cancel_delayed_work_sync(&nic->link_change_work); + + /* wait till all queued set_rx_mode tasks completes */ + drain_workqueue(nic->nicvf_rx_mode_wq); + mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; nicvf_send_msg_to_pf(nic, &mbx); @@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) return nicvf_send_msg_to_pf(nic, &mbx); } +static void nicvf_link_status_check_task(struct work_struct *work_arg) +{ + struct nicvf *nic = container_of(work_arg, + struct nicvf, + link_change_work.work); + union nic_mbx mbx = {}; + mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; + nicvf_send_msg_to_pf(nic, &mbx); + queue_delayed_work(nic->nicvf_rx_mode_wq, + &nic->link_change_work, 2 * HZ); +} + int nicvf_open(struct net_device *netdev) { int cpu, err, qidx; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct nicvf_cq_poll *cq_poll = NULL; - union nic_mbx mbx = {}; + + /* wait till all queued set_rx_mode tasks completes if any */ + drain_workqueue(nic->nicvf_rx_mode_wq); netif_carrier_off(netdev); @@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); /* Send VF config done msg to PF */ - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; - nicvf_write_to_mbx(nic, &mbx); + nicvf_send_cfg_done(nic); + + INIT_DELAYED_WORK(&nic->link_change_work, + nicvf_link_status_check_task); + queue_delayed_work(nic->nicvf_rx_mode_wq, + &nic->link_change_work, 0); return 0; cleanup: @@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, /* flush DMAC filters and reset RX mode */ mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; - nicvf_send_msg_to_pf(nic, &mbx); + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; if (mode & BGX_XCAST_MCAST_FILTER) { /* once enabling filtering, we need to signal to PF to add * its' own LMAC to the filter to accept packets for it. */ mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; - mbx.xcast.data.mac = 0; - nicvf_send_msg_to_pf(nic, &mbx); + mbx.xcast.mac = 0; + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; } /* check if we have any specific MACs to be added to PF DMAC filter */ @@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, /* now go through kernel list of MACs and add them one by one */ for (idx = 0; idx < mc_addrs->count; idx++) { mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; - mbx.xcast.data.mac = mc_addrs->mc[idx]; - nicvf_send_msg_to_pf(nic, &mbx); + mbx.xcast.mac = mc_addrs->mc[idx]; + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; } - kfree(mc_addrs); } /* and finally set rx mode for PF accordingly */ mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; - mbx.xcast.data.mode = mode; + mbx.xcast.mode = mode; nicvf_send_msg_to_pf(nic, &mbx); +free_mc: + kfree(mc_addrs); } static void nicvf_set_rx_mode_task(struct work_struct *work_arg) { struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, - work.work); + work); struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); u8 mode; struct xcast_addr_list *mc; @@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev) kfree(nic->rx_mode_work.mc); nic->rx_mode_work.mc = mc_list; nic->rx_mode_work.mode = mode; - queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); + queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work); spin_unlock(&nic->rx_mode_wq_lock); } @@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->reset_task, nicvf_reset_task); - INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); + nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d", + WQ_MEM_RECLAIM, + nic->vf_id); + INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); spin_lock_init(&nic->rx_mode_wq_lock); + mutex_init(&nic->rx_mode_mtx); err = register_netdev(netdev); if (err) { @@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev) nic = netdev_priv(netdev); pnetdev = nic->pnicvf->netdev; - cancel_delayed_work_sync(&nic->rx_mode_work.work); - /* Check if this Qset is assigned to different VF. * If yes, clean primary and all secondary Qsets. */ if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) unregister_netdev(pnetdev); + if (nic->nicvf_rx_mode_wq) { + destroy_workqueue(nic->nicvf_rx_mode_wq); + nic->nicvf_rx_mode_wq = NULL; + } nicvf_unregister_interrupts(nic); pci_set_drvdata(pdev, NULL); if (nic->drv_stats) @@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = { static int __init nicvf_init_module(void) { pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); - nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic", - WQ_MEM_RECLAIM); return pci_register_driver(&nicvf_driver); } static void __exit nicvf_cleanup_module(void) { - if (nicvf_rx_mode_wq) { - destroy_workqueue(nicvf_rx_mode_wq); - nicvf_rx_mode_wq = NULL; - } pci_unregister_driver(&nicvf_driver); } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index e337da6ba2a4..673c57b8023f 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx) /* Disable MAC steering (NCSI traffic) */ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) - bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); + bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00); } static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index cbdd20b9ee6f..5cbc54e9eb19 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -60,7 +60,7 @@ #define RX_DMACX_CAM_EN BIT_ULL(48) #define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) #define RX_DMAC_COUNT 32 -#define BGX_CMR_RX_STREERING 0x300 +#define BGX_CMR_RX_STEERING 0x300 #define RX_TRAFFIC_STEER_RULE_COUNT 8 #define BGX_CMR_CHAN_MSK_AND 0x450 #define BGX_CMR_BIST_STATUS 0x460 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index c041f44324db..b3654598a2d5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; lld->udb_density = 1 << adap->params.sge.eq_qpp; lld->ucq_density = 1 << adap->params.sge.iq_qpp; + lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); lld->filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 5fa9a2d5fc4b..21da34a4ca24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -336,6 +336,7 @@ struct cxgb4_lld_info { unsigned int cclk_ps; /* Core clock period in psec */ unsigned short udb_density; /* # of user DB/page */ unsigned short ucq_density; /* # of user CQs/page */ + unsigned int sge_host_page_size; /* SGE host page size */ unsigned short filt_mode; /* filter optional components */ unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ /* scheduler queue */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index b8155f5e71b4..ac55db065f16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3128,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); } + + put_device(&pdev->dev); + return 0; } EXPORT_SYMBOL(hns_dsaf_roce_reset); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f52e2c46e6a7..e4ff531db14a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); if (!ok) { + /* Log this in case the user has forgotten to give the kernel + * any buffers, even later in the application. + */ dev_info(&vsi->back->pdev->dev, - "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", + "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n", ring->xsk_umem ? "UMEM enabled " : "", ring->queue_index, pf_q); } @@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_clean_tx_ring(vsi->tx_rings[i]); - if (i40e_enabled_xdp_vsi(vsi)) + if (i40e_enabled_xdp_vsi(vsi)) { + /* Make sure that in-progress ndo_xdp_xmit + * calls are completed. + */ + synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[i]); + } i40e_clean_rx_ring(vsi->rx_rings[i]); } @@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, if (old_prog) bpf_prog_put(old_prog); + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->xdp_rings[i]->xsk_umem) + (void)i40e_xsk_async_xmit(vsi->netdev, i); + return 0; } @@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) { i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); - if (i40e_enabled_xdp_vsi(vsi)) + if (i40e_enabled_xdp_vsi(vsi)) { + /* Make sure that in-progress ndo_xdp_xmit calls are + * completed. + */ + synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); + } i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a7e14e98889f..6c97667d20ef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; struct i40e_ring *xdp_ring; int drops = 0; int i; @@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; - if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return -ENXIO; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 870cf654e436..3827f16e6923 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, err = i40e_queue_pair_enable(vsi, qid); if (err) return err; + + /* Kick start the NAPI context so that receiving will start */ + err = i40e_xsk_async_xmit(vsi->netdev, qid); + if (err) + return err; } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index daff8183534b..cb35d8202572 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) else mrqc = IXGBE_MRQC_VMDQRSS64EN; - /* Enable L3/L4 for Tx Switched packets */ - mrqc |= IXGBE_MRQC_L3L4TXSWEN; + /* Enable L3/L4 for Tx Switched packets only for X550, + * older devices do not support this feature + */ + if (hw->mac.type >= ixgbe_mac_X550) + mrqc |= IXGBE_MRQC_L3L4TXSWEN; } else { if (tcs > 4) mrqc = IXGBE_MRQC_RTRSS8TCEN; @@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct ixgbe_adapter *adapter = netdev_priv(dev); struct bpf_prog *old_prog; + bool need_reset; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) return -EINVAL; @@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return -ENOMEM; old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); /* If transitioning XDP modes reconfigure rings */ - if (!!prog != !!old_prog) { + if (need_reset) { int err = ixgbe_setup_tc(dev, adapter->hw_tcs); if (err) { @@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) if (old_prog) bpf_prog_put(old_prog); + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->xdp_ring[i]->xsk_umem) + (void)ixgbe_xsk_async_xmit(adapter->netdev, i); + return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 65c3e2c979d4..36a8879536a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, ixgbe_txrx_ring_disable(adapter, qid); err = ixgbe_add_xsk_umem(adapter, umem, qid); + if (err) + return err; - if (if_running) + if (if_running) { ixgbe_txrx_ring_enable(adapter, qid); - return err; + /* Kick start the NAPI context so that receiving will start */ + err = ixgbe_xsk_async_xmit(adapter->netdev, qid); + if (err) + return err; + } + + return 0; } static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) @@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) dma_addr_t dma; while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring))) { + if (unlikely(!ixgbe_desc_unused(xdp_ring)) || + !netif_carrier_ok(xdp_ring->netdev)) { work_done = false; break; } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2f427271a793..292a668ce88e 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_probe(pdev); if (ret) - return ret; + goto err_put_clk; pd = dev_get_platdata(&pdev->dev); msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? @@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) infer_hw_params(msp); return 0; + +err_put_clk: + if (!IS_ERR(msp->clk)) + clk_disable_unprepare(msp->clk); + return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9d4568eb2297..8433fb9c3eee 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2146,7 +2146,7 @@ err_drop_frame: if (unlikely(!skb)) goto err_drop_frame_ret_pool; - dma_sync_single_range_for_cpu(dev->dev.parent, + dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, rx_desc->buf_phys_addr, MVNETA_MH_SIZE + NET_SKB_PAD, rx_bytes, diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f3a5fa84860f..57727fe1501e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 200; + pdev->d3_delay = 300; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6b88881b8e35..c1438ae52a11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { - en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", priv->port, dev->dev_addr); err = -EINVAL; goto out; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 32519c93df17..b65e274b02e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { bool configure = false; bool pfc = false; + u16 thres_cells; + u16 delay_cells; bool lossy; - u16 thres; for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { if (prio_tc[j] == i) { @@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, continue; lossy = !(pfc || pause_en); - thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); - delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, - pause_en); - mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); + thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); + delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, + pfc, pause_en); + mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells, + thres_cells, lossy); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index f6ecfa778660..8f72587b5a2c 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -1681,5 +1681,5 @@ MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>"); MODULE_LICENSE("GPL"); module_param_named(debug, debug.msg_enable, int, 0); -MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)"); +MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)"); MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 310807ef328b..4d1b4a24907f 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1400,7 +1400,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx, } static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, - unsigned int frame_length) + unsigned int frame_length, + int nr_frags) { /* called only from within lan743x_tx_xmit_frame. * assuming tx->ring_lock has already been acquired. @@ -1410,6 +1411,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, /* wrap up previous descriptor */ tx->frame_data0 |= TX_DESC_DATA0_EXT_; + if (nr_frags <= 0) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = tx->frame_data0; @@ -1514,8 +1519,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx, u32 tx_tail_flags = 0; /* wrap up previous descriptor */ - tx->frame_data0 |= TX_DESC_DATA0_LS_; - tx->frame_data0 |= TX_DESC_DATA0_IOC_; + if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) == + TX_DESC_DATA0_DTYPE_DATA_) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; buffer_info = &tx->buffer_info[tx->frame_tail]; @@ -1600,7 +1608,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, } if (gso) - lan743x_tx_frame_add_lso(tx, frame_length); + lan743x_tx_frame_add_lso(tx, frame_length, nr_frags); if (nr_frags <= 0) goto finish; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e23ca90289f7..0a868c829b90 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, static int wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - enum alu_op alu_op, bool skip) + enum alu_op alu_op) { const struct bpf_insn *insn = &meta->insn; - if (skip) { - meta->skip = true; - return 0; - } - wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); @@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR); } static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND); } static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR); } static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD); } static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB); } static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index beb8e5d6401a..ded556b7bab5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + if (!ether_addr_equal(ethh->h_dest, + p_hwfn->p_rdma_info->iwarp.mac_addr)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "Got unexpected mac %pM instead of %pM\n", + ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); + return -EINVAL; + } + ether_addr_copy(remote_mac_addr, ethh->h_source); ether_addr_copy(local_mac_addr, ethh->h_dest); @@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; - u32 mpa_buff_size; + u32 buff_size; u16 n_ooo_bufs; int rc = 0; int i; @@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; - data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; + data.input.mtu = params->max_mtu; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ @@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, goto err; } + buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, QED_IWARP_LL2_SYN_RX_SIZE, - QED_IWARP_MAX_SYN_PKT_SIZE, + buff_size, iwarp_info->ll2_syn_handle); if (rc) goto err; @@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, if (rc) goto err; - mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, data.input.rx_num_desc, - mpa_buff_size, + buff_size, iwarp_info->ll2_mpa_handle); if (rc) goto err; @@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; - iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); + iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); if (!iwarp_info->mpa_intermediate_buf) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d00241..7ac959038324 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); #define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_RX_SIZE (256) -#define QED_IWARP_MAX_SYN_PKT_SIZE (128) #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) #define QED_IWARP_MAX_OOO (16) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 20299f6f65fc..736e29635b77 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) static int dwmac4_rx_check_timestamp(void *desc) { struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes0 = le32_to_cpu(p->des0); + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes3 = le32_to_cpu(p->des3); u32 own, ctxt; int ret = 1; - own = p->des3 & RDES3_OWN; - ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) + own = rdes3 & RDES3_OWN; + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); if (likely(!own && ctxt)) { - if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) /* Corrupted value */ ret = -EINVAL; else diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 5d85742a2be0..3c749c327cbd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct ethtool_eee *edata) { struct stmmac_priv *priv = netdev_priv(dev); + int ret; - priv->eee_enabled = edata->eee_enabled; - - if (!priv->eee_enabled) + if (!edata->eee_enabled) { stmmac_disable_eee_mode(priv); - else { + } else { /* We are asking for enabling the EEE but it is safe * to verify all by invoking the eee_init function. * In case of failure it will return an error. */ - priv->eee_enabled = stmmac_eee_init(priv); - if (!priv->eee_enabled) + edata->eee_enabled = stmmac_eee_init(priv); + if (!edata->eee_enabled) return -EOPNOTSUPP; - - /* Do not change tx_lpi_timer in case of failure */ - priv->tx_lpi_timer = edata->tx_lpi_timer; } - return phy_ethtool_set_eee(dev->phydev, edata); + ret = phy_ethtool_set_eee(dev->phydev, edata); + if (ret) + return ret; + + priv->eee_enabled = edata->eee_enabled; + priv->tx_lpi_timer = edata->tx_lpi_timer; + return 0; } static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1f612268c998..d847f672a705 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device, const char *name; char node_name[32]; - if (of_property_read_string(node, "label", &name) < 0) { + if (of_property_read_string(child, "label", &name) < 0) { snprintf(node_name, sizeof(node_name), "%pOFn", child); name = node_name; } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 3377ac66a347..5583d993480d 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -692,15 +692,20 @@ out: static int geneve_open(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); - bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6); bool metadata = geneve->collect_md; + bool ipv4, ipv6; int ret = 0; + ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata; + ipv4 = !ipv6 || metadata; #if IS_ENABLED(CONFIG_IPV6) - if (ipv6 || metadata) + if (ipv6) { ret = geneve_sock_add(geneve, true); + if (ret < 0 && ret != -EAFNOSUPPORT) + ipv4 = false; + } #endif - if (!ret && (!ipv6 || metadata)) + if (ipv4) ret = geneve_sock_add(geneve, false); if (ret < 0) geneve_sock_release(geneve); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 256adbd044f5..cf4897043e83 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -744,6 +744,14 @@ void netvsc_linkstatus_callback(struct net_device *net, schedule_delayed_work(&ndev_ctx->dwork, 0); } +static void netvsc_comp_ipcsum(struct sk_buff *skb) +{ + struct iphdr *iph = (struct iphdr *)skb->data; + + iph->check = 0; + iph->check = ip_fast_csum(iph, iph->ihl); +} + static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, struct netvsc_channel *nvchan) { @@ -770,9 +778,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, /* skb is already created with CHECKSUM_NONE */ skb_checksum_none_assert(skb); - /* - * In Linux, the IP checksum is always checked. - * Do L4 checksum offload if enabled and present. + /* Incoming packets may have IP header checksum verified by the host. + * They may not have IP header checksum computed after coalescing. + * We compute it here if the flags are set, because on Linux, the IP + * checksum is always checked. + */ + if (csum_info && csum_info->receive.ip_checksum_value_invalid && + csum_info->receive.ip_checksum_succeeded && + skb->protocol == htons(ETH_P_IP)) + netvsc_comp_ipcsum(skb); + + /* Do L4 checksum offload if enabled and present. */ if (csum_info && (net->features & NETIF_F_RXCSUM)) { if (csum_info->receive.tcp_checksum_succeeded || diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 7cdac77d0c68..07e41c42bcf5 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev, if (!data) return 0; + if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; if (data[IFLA_IPVLAN_MODE]) { u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); @@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, struct ipvl_dev *tmp = netdev_priv(phy_dev); phy_dev = tmp->phy_dev; + if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; } else if (!netif_is_ipvlan_port(phy_dev)) { /* Exit early if the underlying link is invalid or busy */ if (phy_dev->type != ARPHRD_ETHER || diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index da6a67d47ce9..56fa3606cb9c 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/phy.h> +#include <linux/delay.h> #include <dt-bindings/net/ti-dp83867.h> @@ -325,6 +326,8 @@ static int dp83867_phy_reset(struct phy_device *phydev) if (err < 0) return err; + usleep_range(10, 20); + return dp83867_config_init(phydev); } diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 82ab6ed3b74e..6bac602094bd 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -26,6 +26,8 @@ #include <linux/marvell_phy.h> #include <linux/phy.h> +#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0 + enum { MV_PCS_BASE_T = 0x0000, MV_PCS_BASE_R = 0x1000, @@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev) else reg = 0; + /* Make sure we clear unsupported 2.5G/5G advertising */ ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, - MDIO_AN_10GBT_CTRL_ADV10G, reg); + MDIO_AN_10GBT_CTRL_ADV10G | + MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg); if (ret < 0) return ret; if (ret > 0) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 66b9cfe692fc..7368616286ae 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) err = device_register(&bus->dev); if (err) { pr_err("mii_bus %s failed to register\n", bus->id); - put_device(&bus->dev); return -EINVAL; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index b1f959935f50..b7df0295a3ca 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -344,6 +344,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } +static int ksz8061_config_init(struct phy_device *phydev) +{ + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); + if (ret) + return ret; + + return kszphy_config_init(phydev); +} + static int ksz9021_load_values_from_of(struct phy_device *phydev, const struct device_node *of_node, u16 reg, @@ -1040,7 +1051,7 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8061", .phy_id_mask = MICREL_PHY_ID_MASK, .features = PHY_BASIC_FEATURES, - .config_init = kszphy_config_init, + .config_init = ksz8061_config_init, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .suspend = genphy_suspend, diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 938803237d7f..85987aac31c4 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -320,6 +320,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state * linkmode_zero(state->lp_advertising); state->interface = pl->link_config.interface; state->an_enabled = pl->link_config.an_enabled; + state->speed = SPEED_UNKNOWN; + state->duplex = DUPLEX_UNKNOWN; + state->pause = MLO_PAUSE_NONE; + state->an_complete = 0; state->link = 1; return pl->ops->mac_link_state(ndev, state); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index c6010fb1aa0f..cb4a23041a94 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = { .name = "RTL8366RB Gigabit Ethernet", .features = PHY_GBIT_FEATURES, .config_init = &rtl8366rb_config_init, + /* These interrupts are handled by the irq controller + * embedded inside the RTL8366RB, they get unmasked when the + * irq is requested and ACKed by reading the status register, + * which is done by the irqchip code. + */ + .ack_interrupt = genphy_no_ack_interrupt, + .config_intr = genphy_no_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, }, diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 74a8782313cf..bd6084e315de 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) u16 val = 0; int err; - err = priv->phy_drv->read_status(phydev); + if (priv->phy_drv->read_status) + err = priv->phy_drv->read_status(phydev); + else + err = genphy_read_status(phydev); if (err < 0) return err; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 958f1cf67282..6ce3f666d142 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1256,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); __team_compute_features(team); - __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); + __team_port_change_port_added(port, !!netif_oper_up(port_dev)); __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); @@ -2915,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused, switch (event) { case NETDEV_UP: - if (netif_carrier_ok(dev)) + if (netif_oper_up(dev)) team_port_change_check(port, true); break; case NETDEV_DOWN: diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fed298c0cb39..53f4f37b0ffd 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2167,9 +2167,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) } add_wait_queue(&tfile->wq.wait, &wait); - current->state = TASK_INTERRUPTIBLE; while (1) { + set_current_state(TASK_INTERRUPTIBLE); ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) break; @@ -2185,7 +2185,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) schedule(); } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&tfile->wq.wait, &wait); out: diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 735ad838e2ba..18af2f8eee96 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 60dd1ec1665f..86c8c64fbb0f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -557,6 +557,7 @@ enum spd_duplex { /* MAC PASSTHRU */ #define AD_MASK 0xfee0 #define BND_MASK 0x0004 +#define BD_MASK 0x0001 #define EFUSE 0xcfdb #define PASS_THRU_MASK 0x1 @@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) return -ENODEV; } } else { - /* test for RTL8153-BND */ + /* test for RTL8153-BND and RTL8153-BD */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); - if ((ocp_data & BND_MASK) == 0) { + if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) { netif_dbg(tp, probe, tp->netdev, "Invalid variant for MAC pass through\n"); return -ENODEV; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 95909e262ba4..7c1430ed0244 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev) /* default to no qdisc; user can add if desired */ dev->priv_flags |= IFF_NO_QUEUE; + + dev->min_mtu = 0; + dev->max_mtu = 0; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 320edcac4699..6359053bd0c7 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) goto out_err; } - genlmsg_reply(skb, info); + res = genlmsg_reply(skb, info); break; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0e6b43bb4678..a5ea3ba495a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = { .get_txpower = mt76x02_get_txpower, }; -static int mt76x0u_register_device(struct mt76x02_dev *dev) +static int mt76x0u_init_hardware(struct mt76x02_dev *dev) { - struct ieee80211_hw *hw = dev->mt76.hw; int err; - err = mt76u_alloc_queues(&dev->mt76); - if (err < 0) - goto out_err; - - err = mt76u_mcu_init_rx(&dev->mt76); - if (err < 0) - goto out_err; - mt76x0_chip_onoff(dev, true, true); - if (!mt76x02_wait_for_mac(&dev->mt76)) { - err = -ETIMEDOUT; - goto out_err; - } + + if (!mt76x02_wait_for_mac(&dev->mt76)) + return -ETIMEDOUT; err = mt76x0u_mcu_init(dev); if (err < 0) - goto out_err; + return err; mt76x0_init_usb_dma(dev); err = mt76x0_init_hardware(dev); if (err < 0) - goto out_err; + return err; mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_wr(dev, MT_TXOP_CTRL_CFG, FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); + return 0; +} + +static int mt76x0u_register_device(struct mt76x02_dev *dev) +{ + struct ieee80211_hw *hw = dev->mt76.hw; + int err; + + err = mt76u_alloc_queues(&dev->mt76); + if (err < 0) + goto out_err; + + err = mt76u_mcu_init_rx(&dev->mt76); + if (err < 0) + goto out_err; + + err = mt76x0u_init_hardware(dev); + if (err < 0) + goto out_err; + err = mt76x0_register_device(dev); if (err < 0) goto out_err; @@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, mt76u_stop_queues(&dev->mt76); mt76x0u_mac_stop(dev); + clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); + mt76x0_chip_onoff(dev, false, false); usb_kill_urb(usb->mcu.res.urb); return 0; @@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->tx_tasklet); - ret = mt76x0_init_hardware(dev); + ret = mt76x0u_init_hardware(dev); if (ret) goto err; diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 0ccb021f1e78..10d580c3dea3 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif) if (xenvif_hash_cache_size == 0) return; + BUG_ON(vif->hash.cache.count); + spin_lock_init(&vif->hash.cache.lock); INIT_LIST_HEAD(&vif->hash.cache.list); } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 182d6770f102..6da12518e693 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, { struct xenvif *vif = netdev_priv(dev); unsigned int size = vif->hash.size; + unsigned int num_queues; + + /* If queues are not set up internally - always return 0 + * as the packet going to be dropped anyway */ + num_queues = READ_ONCE(vif->num_queues); + if (num_queues < 1) + return 0; if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) return fallback(dev, skb, NULL) % dev->real_num_tx_queues; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 80aae3a32c2a..f09948b009dd 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s skb_frag_size_set(&frags[i], len); } - /* Copied all the bits from the frag list -- free it. */ - skb_frag_list_init(skb); - xenvif_skb_zerocopy_prepare(queue, nskb); - kfree_skb(nskb); - /* Release all the original (foreign) frags. */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) skb_frag_unref(skb, f); @@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) xenvif_fill_frags(queue, skb); if (unlikely(skb_has_frag_list(skb))) { + struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + xenvif_skb_zerocopy_prepare(queue, nskb); if (xenvif_handle_frag_list(queue, skb)) { if (net_ratelimit()) netdev_err(queue->vif->dev, @@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) kfree_skb(skb); continue; } + /* Copied all the bits from the frag list -- free it. */ + skb_frag_list_init(skb); + kfree_skb(nskb); } skb->dev = queue->vif->dev; diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index c69ca95b1ad5..0f140a802137 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = { static const char * const sdxc_a_groups[] = { "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", - "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" + "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a" }; static const char * const pcm_a_groups[] = { diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c index 7aae52a09ff0..4ffd56ff809e 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs404.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c @@ -79,7 +79,7 @@ enum { .intr_cfg_reg = 0, \ .intr_status_reg = 0, \ .intr_target_reg = 0, \ - .tile = NORTH, \ + .tile = SOUTH, \ .mux_bit = -1, \ .pull_bit = pull, \ .drv_bit = drv, \ diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index 28f55248eb90..753a6a1b30c3 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c @@ -11,7 +11,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> -#include <linux/i2c.h> #include <linux/of.h> #include <linux/regulator/of_regulator.h> #include <linux/platform_device.h> @@ -22,12 +21,7 @@ struct pm8607_regulator_info { struct regulator_desc desc; - struct pm860x_chip *chip; - struct regulator_dev *regulator; - struct i2c_client *i2c; - struct i2c_client *i2c_8606; - unsigned int *vol_table; unsigned int *vol_suspend; int slope_double; @@ -210,13 +204,15 @@ static const unsigned int LDO14_suspend_table[] = { static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); - int ret = -EINVAL; + int ret; + + ret = regulator_list_voltage_table(rdev, index); + if (ret < 0) + return ret; + + if (info->slope_double) + ret <<= 1; - if (info->vol_table && (index < rdev->desc->n_voltages)) { - ret = info->vol_table[index]; - if (info->slope_double) - ret <<= 1; - } return ret; } @@ -257,6 +253,7 @@ static const struct regulator_ops pm8606_preg_ops = { .type = REGULATOR_VOLTAGE, \ .id = PM8607_ID_##vreg, \ .owner = THIS_MODULE, \ + .volt_table = vreg##_table, \ .n_voltages = ARRAY_SIZE(vreg##_table), \ .vsel_reg = PM8607_##vreg, \ .vsel_mask = ARRAY_SIZE(vreg##_table) - 1, \ @@ -266,7 +263,6 @@ static const struct regulator_ops pm8606_preg_ops = { .enable_mask = 1 << (ebit), \ }, \ .slope_double = (0), \ - .vol_table = (unsigned int *)&vreg##_table, \ .vol_suspend = (unsigned int *)&vreg##_suspend_table, \ } @@ -278,6 +274,7 @@ static const struct regulator_ops pm8606_preg_ops = { .type = REGULATOR_VOLTAGE, \ .id = PM8607_ID_LDO##_id, \ .owner = THIS_MODULE, \ + .volt_table = LDO##_id##_table, \ .n_voltages = ARRAY_SIZE(LDO##_id##_table), \ .vsel_reg = PM8607_##vreg, \ .vsel_mask = (ARRAY_SIZE(LDO##_id##_table) - 1) << (shift), \ @@ -285,7 +282,6 @@ static const struct regulator_ops pm8606_preg_ops = { .enable_mask = 1 << (ebit), \ }, \ .slope_double = (0), \ - .vol_table = (unsigned int *)&LDO##_id##_table, \ .vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \ } @@ -349,6 +345,7 @@ static int pm8607_regulator_probe(struct platform_device *pdev) struct pm8607_regulator_info *info = NULL; struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev); struct regulator_config config = { }; + struct regulator_dev *rdev; struct resource *res; int i; @@ -371,13 +368,9 @@ static int pm8607_regulator_probe(struct platform_device *pdev) /* i is used to check regulator ID */ i = -1; } - info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; - info->i2c_8606 = (chip->id == CHIP_PM8607) ? chip->companion : - chip->client; - info->chip = chip; /* check DVC ramp slope double */ - if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double) + if ((i == PM8607_ID_BUCK3) && chip->buck3_double) info->slope_double = 1; config.dev = &pdev->dev; @@ -392,12 +385,11 @@ static int pm8607_regulator_probe(struct platform_device *pdev) else config.regmap = chip->regmap_companion; - info->regulator = devm_regulator_register(&pdev->dev, &info->desc, - &config); - if (IS_ERR(info->regulator)) { + rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); + if (IS_ERR(rdev)) { dev_err(&pdev->dev, "failed to register regulator %s\n", info->desc.name); - return PTR_ERR(info->regulator); + return PTR_ERR(rdev); } platform_set_drvdata(pdev, info); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index ee60a222f5eb..b7f249ee5e68 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -180,6 +180,17 @@ config REGULATOR_BCM590XX BCM590xx PMUs. This will enable support for the software controllable LDO/Switching regulators. +config REGULATOR_BD70528 + tristate "ROHM BD70528 Power Regulator" + depends on MFD_ROHM_BD70528 + help + This driver supports voltage regulators on ROHM BD70528 PMIC. + This will enable support for the software controllable buck + and LDO regulators. + + This driver can also be built as a module. If so, the module + will be called bd70528-regulator. + config REGULATOR_BD718XX tristate "ROHM BD71837 Power Regulator" depends on MFD_ROHM_BD718XX @@ -457,6 +468,14 @@ config REGULATOR_MAX77620 chip to control Step-Down DC-DC and LDOs. Say Y here to enable the regulator driver. +config REGULATOR_MAX77650 + tristate "Maxim MAX77650/77651 regulator support" + depends on MFD_MAX77650 + help + Regulator driver for MAX77650/77651 PMIC from Maxim + Semiconductor. This device has a SIMO with three independent + power rails and an LDO. + config REGULATOR_MAX8649 tristate "Maxim 8649 voltage regulator" depends on I2C @@ -484,7 +503,7 @@ config REGULATOR_MAX8925 tristate "Maxim MAX8925 Power Management IC" depends on MFD_MAX8925 help - Say y here to support the voltage regulaltor of Maxim MAX8925 PMIC. + Say y here to support the voltage regulator of Maxim MAX8925 PMIC. config REGULATOR_MAX8952 tristate "Maxim MAX8952 Power Management IC" @@ -501,7 +520,7 @@ config REGULATOR_MAX8973 select REGMAP_I2C help The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down - switching regulator delievers up to 9A of output current. Each + switching regulator delivers up to 9A of output current. Each phase operates at a 2MHz fixed frequency with a 120 deg shift from the adjacent phase, allowing the use of small magnetic component. @@ -646,7 +665,7 @@ config REGULATOR_PCF50633 tristate "NXP PCF50633 regulator driver" depends on MFD_PCF50633 help - Say Y here to support the voltage regulators and convertors + Say Y here to support the voltage regulators and converters on PCF50633 config REGULATOR_PFUZE100 @@ -924,7 +943,7 @@ config REGULATOR_TPS65132 select REGMAP_I2C help This driver supports TPS65132 single inductor - dual output - power supply specifcally designed for display panels. + power supply specifically designed for display panels. config REGULATOR_TPS65217 tristate "TI TPS65217 Power regulators" diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index b12e1c9b2118..1169f8a27d91 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o +obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o @@ -60,6 +61,7 @@ obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o +obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c index 21e20483bd91..e0239cf3f56d 100644 --- a/drivers/regulator/act8865-regulator.c +++ b/drivers/regulator/act8865-regulator.c @@ -131,7 +131,7 @@ * ACT8865 voltage number */ #define ACT8865_VOLTAGE_NUM 64 -#define ACT8600_SUDCDC_VOLTAGE_NUM 255 +#define ACT8600_SUDCDC_VOLTAGE_NUM 256 struct act8865 { struct regmap *regmap; @@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = { REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0), REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000), REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000), - REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000), + REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000), + REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0), }; static struct regulator_ops act8865_ops = { diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c index 603db77723b6..caa61d306a69 100644 --- a/drivers/regulator/act8945a-regulator.c +++ b/drivers/regulator/act8945a-regulator.c @@ -87,7 +87,8 @@ static const struct regulator_linear_range act8945a_voltage_ranges[] = { static int act8945a_set_suspend_state(struct regulator_dev *rdev, bool enable) { struct regmap *regmap = rdev->regmap; - int id = rdev->desc->id, reg, val; + int id = rdev_get_id(rdev); + int reg, val; switch (id) { case ACT8945A_ID_DCDC1: @@ -159,7 +160,7 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev); struct regmap *regmap = rdev->regmap; - int id = rdev->desc->id; + int id = rdev_get_id(rdev); int reg, ret, val = 0; switch (id) { @@ -190,11 +191,11 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) switch (mode) { case REGULATOR_MODE_STANDBY: - if (rdev->desc->id > ACT8945A_ID_DCDC3) + if (id > ACT8945A_ID_DCDC3) val = BIT(5); break; case REGULATOR_MODE_NORMAL: - if (rdev->desc->id <= ACT8945A_ID_DCDC3) + if (id <= ACT8945A_ID_DCDC3) val = BIT(5); break; default: @@ -213,7 +214,7 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) static unsigned int act8945a_get_mode(struct regulator_dev *rdev) { struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev); - int id = rdev->desc->id; + int id = rdev_get_id(rdev); if (id < ACT8945A_ID_DCDC1 || id >= ACT8945A_ID_MAX) return -EINVAL; diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c index b9a93049e41e..bf3ab405eed1 100644 --- a/drivers/regulator/arizona-ldo1.c +++ b/drivers/regulator/arizona-ldo1.c @@ -40,35 +40,10 @@ struct arizona_ldo1 { struct gpio_desc *ena_gpiod; }; -static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev, - unsigned int selector) -{ - if (selector >= rdev->desc->n_voltages) - return -EINVAL; - - if (selector == rdev->desc->n_voltages - 1) - return 1800000; - else - return rdev->desc->min_uV + (rdev->desc->uV_step * selector); -} - -static int arizona_ldo1_hc_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - int sel; - - sel = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); - if (sel >= rdev->desc->n_voltages) - sel = rdev->desc->n_voltages - 1; - - return sel; -} - static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) { - struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev); - struct regmap *regmap = ldo->regmap; + struct regmap *regmap = rdev_get_regmap(rdev); unsigned int val; int ret; @@ -85,16 +60,12 @@ static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev, if (val) return 0; - val = sel << ARIZONA_LDO1_VSEL_SHIFT; - - return regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_1, - ARIZONA_LDO1_VSEL_MASK, val); + return regulator_set_voltage_sel_regmap(rdev, sel); } static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev) { - struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev); - struct regmap *regmap = ldo->regmap; + struct regmap *regmap = rdev_get_regmap(rdev); unsigned int val; int ret; @@ -105,32 +76,35 @@ static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev) if (val & ARIZONA_LDO1_HI_PWR) return rdev->desc->n_voltages - 1; - ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_1, &val); - if (ret != 0) - return ret; - - return (val & ARIZONA_LDO1_VSEL_MASK) >> ARIZONA_LDO1_VSEL_SHIFT; + return regulator_get_voltage_sel_regmap(rdev); } static const struct regulator_ops arizona_ldo1_hc_ops = { - .list_voltage = arizona_ldo1_hc_list_voltage, - .map_voltage = arizona_ldo1_hc_map_voltage, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, .get_voltage_sel = arizona_ldo1_hc_get_voltage_sel, .set_voltage_sel = arizona_ldo1_hc_set_voltage_sel, .get_bypass = regulator_get_bypass_regmap, .set_bypass = regulator_set_bypass_regmap, }; +static const struct regulator_linear_range arizona_ldo1_hc_ranges[] = { + REGULATOR_LINEAR_RANGE(900000, 0, 0x6, 50000), + REGULATOR_LINEAR_RANGE(1800000, 0x7, 0x7, 0), +}; + static const struct regulator_desc arizona_ldo1_hc = { .name = "LDO1", .supply_name = "LDOVDD", .type = REGULATOR_VOLTAGE, .ops = &arizona_ldo1_hc_ops, + .vsel_reg = ARIZONA_LDO1_CONTROL_1, + .vsel_mask = ARIZONA_LDO1_VSEL_MASK, .bypass_reg = ARIZONA_LDO1_CONTROL_1, .bypass_mask = ARIZONA_LDO1_BYPASS, - .min_uV = 900000, - .uV_step = 50000, + .linear_ranges = arizona_ldo1_hc_ranges, + .n_linear_ranges = ARRAY_SIZE(arizona_ldo1_hc_ranges), .n_voltages = 8, .enable_time = 1500, .ramp_delay = 24000, diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c index 66337e12719b..e5fed289b52d 100644 --- a/drivers/regulator/as3722-regulator.c +++ b/drivers/regulator/as3722-regulator.c @@ -886,7 +886,7 @@ static int as3722_regulator_probe(struct platform_device *pdev) as3722_regs->desc[id].min_uV = 410000; } else { as3722_regs->desc[id].n_voltages = - AS3722_SD0_VSEL_MAX + 1, + AS3722_SD0_VSEL_MAX + 1; as3722_regs->desc[id].min_uV = 610000; } as3722_regs->desc[id].uV_step = 10000; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 48af859fd053..fba8f58ab769 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -367,13 +367,12 @@ static const int axp209_dcdc2_ldo3_slew_rates[] = { static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) { struct axp20x_dev *axp20x = rdev_get_drvdata(rdev); - const struct regulator_desc *desc = rdev->desc; + const struct regulator_desc *desc; u8 reg, mask, enable, cfg = 0xff; const int *slew_rates; int rate_count = 0; - if (!rdev) - return -EINVAL; + desc = rdev->desc; switch (axp20x->variant) { case AXP209_ID: @@ -436,11 +435,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) static int axp20x_regulator_enable_regmap(struct regulator_dev *rdev) { struct axp20x_dev *axp20x = rdev_get_drvdata(rdev); - const struct regulator_desc *desc = rdev->desc; + const struct regulator_desc *desc; if (!rdev) return -EINVAL; + desc = rdev->desc; + switch (axp20x->variant) { case AXP209_ID: if ((desc->id == AXP20X_LDO3) && @@ -573,7 +574,7 @@ static const struct regulator_desc axp22x_regulators[] = { AXP22X_DCDC3_V_OUT, AXP22X_DCDC3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC3_MASK), AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, - AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT, + AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT_MASK, AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC4_MASK), AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, AXP22X_DCDC5_V_OUT, AXP22X_DCDC5_V_OUT_MASK, @@ -719,7 +720,7 @@ static const struct regulator_desc axp803_regulators[] = { AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK, AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK), AXP_DESC(AXP803, ALDO2, "aldo2", "aldoin", 700, 3300, 100, - AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT, + AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK), AXP_DESC(AXP803, ALDO3, "aldo3", "aldoin", 700, 3300, 100, AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK, @@ -729,7 +730,7 @@ static const struct regulator_desc axp803_regulators[] = { AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK), AXP_DESC_RANGES(AXP803, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES, - AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT, + AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK), AXP_DESC(AXP803, DLDO3, "dldo3", "dldoin", 700, 3300, 100, AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK, @@ -744,7 +745,7 @@ static const struct regulator_desc axp803_regulators[] = { AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), AXP_DESC(AXP803, ELDO3, "eldo3", "eldoin", 700, 1900, 50, - AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT, + AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), AXP_DESC(AXP803, FLDO1, "fldo1", "fldoin", 700, 1450, 50, AXP803_FLDO1_V_OUT, AXP803_FLDO1_V_OUT_MASK, @@ -791,7 +792,7 @@ static const struct regulator_desc axp806_regulators[] = { AXP806_DCDCA_V_CTRL, AXP806_DCDCA_V_CTRL_MASK, AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCA_MASK), AXP_DESC(AXP806, DCDCB, "dcdcb", "vinb", 1000, 2550, 50, - AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL, + AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL_MASK, AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCB_MASK), AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc", axp806_dcdca_ranges, AXP806_DCDCA_NUM_VOLTAGES, @@ -817,7 +818,7 @@ static const struct regulator_desc axp806_regulators[] = { AXP806_BLDO1_V_CTRL, AXP806_BLDO1_V_CTRL_MASK, AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO1_MASK), AXP_DESC(AXP806, BLDO2, "bldo2", "bldoin", 700, 1900, 100, - AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL, + AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL_MASK, AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO2_MASK), AXP_DESC(AXP806, BLDO3, "bldo3", "bldoin", 700, 1900, 100, AXP806_BLDO3_V_CTRL, AXP806_BLDO3_V_CTRL_MASK, @@ -952,7 +953,7 @@ static const struct regulator_desc axp813_regulators[] = { AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK, AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK), AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100, - AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT, + AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK), AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100, AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK, @@ -962,7 +963,7 @@ static const struct regulator_desc axp813_regulators[] = { AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK), AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES, - AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT, + AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK), AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100, AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK, @@ -977,7 +978,7 @@ static const struct regulator_desc axp813_regulators[] = { AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50, - AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT, + AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), /* to do / check ... */ AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50, diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c index 92d6d7b10cf7..e49c0a7d5dd5 100644 --- a/drivers/regulator/bcm590xx-regulator.c +++ b/drivers/regulator/bcm590xx-regulator.c @@ -242,8 +242,12 @@ static int bcm590xx_get_enable_register(int id) case BCM590XX_REG_SDSR2: reg = BCM590XX_SDSR2PMCTRL1; break; + case BCM590XX_REG_VSR: + reg = BCM590XX_VSRPMCTRL1; + break; case BCM590XX_REG_VBUS: reg = BCM590XX_OTG_CTRL; + break; } diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c new file mode 100644 index 000000000000..30e3ed430a8a --- /dev/null +++ b/drivers/regulator/bd70528-regulator.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 ROHM Semiconductors +// bd70528-regulator.c ROHM BD70528MWV regulator driver + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/rohm-bd70528.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> +#include <linux/slab.h> + +#define BUCK_RAMPRATE_250MV 0 +#define BUCK_RAMPRATE_125MV 1 +#define BUCK_RAMP_MAX 250 + +static const struct regulator_linear_range bd70528_buck1_volts[] = { + REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 600000), + REGULATOR_LINEAR_RANGE(2750000, 0x2, 0xf, 50000), +}; +static const struct regulator_linear_range bd70528_buck2_volts[] = { + REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 300000), + REGULATOR_LINEAR_RANGE(1550000, 0x2, 0xd, 50000), + REGULATOR_LINEAR_RANGE(3000000, 0xe, 0xf, 300000), +}; +static const struct regulator_linear_range bd70528_buck3_volts[] = { + REGULATOR_LINEAR_RANGE(800000, 0x00, 0xd, 50000), + REGULATOR_LINEAR_RANGE(1800000, 0xe, 0xf, 0), +}; + +/* All LDOs have same voltage ranges */ +static const struct regulator_linear_range bd70528_ldo_volts[] = { + REGULATOR_LINEAR_RANGE(1650000, 0x0, 0x07, 50000), + REGULATOR_LINEAR_RANGE(2100000, 0x8, 0x0f, 100000), + REGULATOR_LINEAR_RANGE(2850000, 0x10, 0x19, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x19, 0x1f, 0), +}; + +/* Also both LEDs support same voltages */ +static const unsigned int led_volts[] = { + 20000, 30000 +}; + +static int bd70528_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + if (ramp_delay > 0 && ramp_delay <= BUCK_RAMP_MAX) { + unsigned int ramp_value = BUCK_RAMPRATE_250MV; + + if (ramp_delay <= 125) + ramp_value = BUCK_RAMPRATE_125MV; + + return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, + BD70528_MASK_BUCK_RAMP, + ramp_value << BD70528_SIFT_BUCK_RAMP); + } + dev_err(&rdev->dev, "%s: ramp_delay: %d not supported\n", + rdev->desc->name, ramp_delay); + return -EINVAL; +} + +static int bd70528_led_set_voltage_sel(struct regulator_dev *rdev, + unsigned int sel) +{ + int ret; + + ret = regulator_is_enabled_regmap(rdev); + if (ret < 0) + return ret; + + if (ret == 0) + return regulator_set_voltage_sel_regmap(rdev, sel); + + dev_err(&rdev->dev, + "LED voltage change not allowed when led is enabled\n"); + + return -EBUSY; +} + +static const struct regulator_ops bd70528_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = bd70528_set_ramp_delay, +}; + +static const struct regulator_ops bd70528_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = bd70528_set_ramp_delay, +}; + +static const struct regulator_ops bd70528_led_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .set_voltage_sel = bd70528_led_set_voltage_sel, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_desc bd70528_desc[] = { + { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_BUCK1, + .ops = &bd70528_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_buck1_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_buck1_volts), + .n_voltages = BD70528_BUCK_VOLTS, + .enable_reg = BD70528_REG_BUCK1_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_BUCK1_VOLT, + .vsel_mask = BD70528_MASK_BUCK_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_BUCK2, + .ops = &bd70528_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_buck2_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_buck2_volts), + .n_voltages = BD70528_BUCK_VOLTS, + .enable_reg = BD70528_REG_BUCK2_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_BUCK2_VOLT, + .vsel_mask = BD70528_MASK_BUCK_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "buck3", + .of_match = of_match_ptr("BUCK3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_BUCK3, + .ops = &bd70528_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_buck3_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_buck3_volts), + .n_voltages = BD70528_BUCK_VOLTS, + .enable_reg = BD70528_REG_BUCK3_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_BUCK3_VOLT, + .vsel_mask = BD70528_MASK_BUCK_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LDO1, + .ops = &bd70528_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts), + .n_voltages = BD70528_LDO_VOLTS, + .enable_reg = BD70528_REG_LDO1_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_LDO1_VOLT, + .vsel_mask = BD70528_MASK_LDO_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LDO2, + .ops = &bd70528_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts), + .n_voltages = BD70528_LDO_VOLTS, + .enable_reg = BD70528_REG_LDO2_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_LDO2_VOLT, + .vsel_mask = BD70528_MASK_LDO_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo3", + .of_match = of_match_ptr("LDO3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LDO3, + .ops = &bd70528_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts), + .n_voltages = BD70528_LDO_VOLTS, + .enable_reg = BD70528_REG_LDO3_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_LDO3_VOLT, + .vsel_mask = BD70528_MASK_LDO_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo_led1", + .of_match = of_match_ptr("LDO_LED1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LED1, + .ops = &bd70528_led_ops, + .type = REGULATOR_VOLTAGE, + .volt_table = &led_volts[0], + .n_voltages = ARRAY_SIZE(led_volts), + .enable_reg = BD70528_REG_LED_EN, + .enable_mask = BD70528_MASK_LED1_EN, + .vsel_reg = BD70528_REG_LED_VOLT, + .vsel_mask = BD70528_MASK_LED1_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo_led2", + .of_match = of_match_ptr("LDO_LED2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LED2, + .ops = &bd70528_led_ops, + .type = REGULATOR_VOLTAGE, + .volt_table = &led_volts[0], + .n_voltages = ARRAY_SIZE(led_volts), + .enable_reg = BD70528_REG_LED_EN, + .enable_mask = BD70528_MASK_LED2_EN, + .vsel_reg = BD70528_REG_LED_VOLT, + .vsel_mask = BD70528_MASK_LED2_VOLT, + .owner = THIS_MODULE, + }, + +}; + +static int bd70528_probe(struct platform_device *pdev) +{ + struct rohm_regmap_dev *bd70528; + int i; + struct regulator_config config = { + .dev = pdev->dev.parent, + }; + + bd70528 = dev_get_drvdata(pdev->dev.parent); + if (!bd70528) { + dev_err(&pdev->dev, "No MFD driver data\n"); + return -EINVAL; + } + + config.regmap = bd70528->regmap; + + for (i = 0; i < ARRAY_SIZE(bd70528_desc); i++) { + struct regulator_dev *rdev; + + rdev = devm_regulator_register(&pdev->dev, &bd70528_desc[i], + &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "failed to register %s regulator\n", + bd70528_desc[i].name); + return PTR_ERR(rdev); + } + } + return 0; +} + +static struct platform_driver bd70528_regulator = { + .driver = { + .name = "bd70528-pmic" + }, + .probe = bd70528_probe, +}; + +module_platform_driver(bd70528_regulator); + +MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>"); +MODULE_DESCRIPTION("BD70528 voltage regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index b8dcdc21dc22..b2191be49670 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -79,7 +79,7 @@ static int bd718xx_set_voltage_sel_pickable_restricted( return regulator_set_voltage_sel_pickable_regmap(rdev, sel); } -static struct regulator_ops bd718xx_pickable_range_ldo_ops = { +static const struct regulator_ops bd718xx_pickable_range_ldo_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -88,7 +88,7 @@ static struct regulator_ops bd718xx_pickable_range_ldo_ops = { .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap, }; -static struct regulator_ops bd718xx_pickable_range_buck_ops = { +static const struct regulator_ops bd718xx_pickable_range_buck_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -98,7 +98,7 @@ static struct regulator_ops bd718xx_pickable_range_buck_ops = { .set_voltage_time_sel = regulator_set_voltage_time_sel, }; -static struct regulator_ops bd718xx_ldo_regulator_ops = { +static const struct regulator_ops bd718xx_ldo_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -107,7 +107,7 @@ static struct regulator_ops bd718xx_ldo_regulator_ops = { .get_voltage_sel = regulator_get_voltage_sel_regmap, }; -static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = { +static const struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -116,7 +116,7 @@ static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = { .get_voltage_sel = regulator_get_voltage_sel_regmap, }; -static struct regulator_ops bd718xx_buck_regulator_ops = { +static const struct regulator_ops bd718xx_buck_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -126,7 +126,7 @@ static struct regulator_ops bd718xx_buck_regulator_ops = { .set_voltage_time_sel = regulator_set_voltage_time_sel, }; -static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = { +static const struct regulator_ops bd718xx_buck_regulator_nolinear_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -137,7 +137,7 @@ static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = { .set_voltage_time_sel = regulator_set_voltage_time_sel, }; -static struct regulator_ops bd718xx_dvs_buck_regulator_ops = { +static const struct regulator_ops bd718xx_dvs_buck_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -350,6 +350,135 @@ static const struct reg_init bd71837_ldo6_inits[] = { }, }; +#define NUM_DVS_BUCKS 4 + +struct of_dvs_setting { + const char *prop; + unsigned int reg; +}; + +static int set_dvs_levels(const struct of_dvs_setting *dvs, + struct device_node *np, + const struct regulator_desc *desc, + struct regmap *regmap) +{ + int ret, i; + unsigned int uv; + + ret = of_property_read_u32(np, dvs->prop, &uv); + if (ret) { + if (ret != -EINVAL) + return ret; + return 0; + } + + for (i = 0; i < desc->n_voltages; i++) { + ret = regulator_desc_list_voltage_linear_range(desc, i); + if (ret < 0) + continue; + if (ret == uv) { + i <<= ffs(desc->vsel_mask) - 1; + ret = regmap_update_bits(regmap, dvs->reg, + DVS_BUCK_RUN_MASK, i); + break; + } + } + return ret; +} + +static int buck4_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + const struct of_dvs_setting dvs[] = { + { + .prop = "rohm,dvs-run-voltage", + .reg = BD71837_REG_BUCK4_VOLT_RUN, + }, + }; + + for (i = 0; i < ARRAY_SIZE(dvs); i++) { + ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap); + if (ret) + break; + } + return ret; +} +static int buck3_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + const struct of_dvs_setting dvs[] = { + { + .prop = "rohm,dvs-run-voltage", + .reg = BD71837_REG_BUCK3_VOLT_RUN, + }, + }; + + for (i = 0; i < ARRAY_SIZE(dvs); i++) { + ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap); + if (ret) + break; + } + return ret; +} + +static int buck2_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + const struct of_dvs_setting dvs[] = { + { + .prop = "rohm,dvs-run-voltage", + .reg = BD718XX_REG_BUCK2_VOLT_RUN, + }, + { + .prop = "rohm,dvs-idle-voltage", + .reg = BD718XX_REG_BUCK2_VOLT_IDLE, + }, + }; + + + + for (i = 0; i < ARRAY_SIZE(dvs); i++) { + ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap); + if (ret) + break; + } + return ret; +} + +static int buck1_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + const struct of_dvs_setting dvs[] = { + { + .prop = "rohm,dvs-run-voltage", + .reg = BD718XX_REG_BUCK1_VOLT_RUN, + }, + { + .prop = "rohm,dvs-idle-voltage", + .reg = BD718XX_REG_BUCK1_VOLT_IDLE, + }, + { + .prop = "rohm,dvs-suspend-voltage", + .reg = BD718XX_REG_BUCK1_VOLT_SUSP, + }, + }; + + for (i = 0; i < ARRAY_SIZE(dvs); i++) { + ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap); + if (ret) + break; + } + return ret; +} + static const struct bd718xx_regulator_data bd71847_regulators[] = { { .desc = { @@ -368,6 +497,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .enable_reg = BD718XX_REG_BUCK1_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck1_set_hw_dvs_levels, }, .init = { .reg = BD718XX_REG_BUCK1_CTRL, @@ -391,6 +521,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .enable_reg = BD718XX_REG_BUCK2_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck2_set_hw_dvs_levels, }, .init = { .reg = BD718XX_REG_BUCK2_CTRL, @@ -662,6 +793,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .enable_reg = BD718XX_REG_BUCK1_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck1_set_hw_dvs_levels, }, .init = { .reg = BD718XX_REG_BUCK1_CTRL, @@ -685,6 +817,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .enable_reg = BD718XX_REG_BUCK2_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck2_set_hw_dvs_levels, }, .init = { .reg = BD718XX_REG_BUCK2_CTRL, @@ -708,6 +841,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .enable_reg = BD71837_REG_BUCK3_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck3_set_hw_dvs_levels, }, .init = { .reg = BD71837_REG_BUCK3_CTRL, @@ -731,6 +865,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .enable_reg = BD71837_REG_BUCK4_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck4_set_hw_dvs_levels, }, .init = { .reg = BD71837_REG_BUCK4_CTRL, @@ -1029,6 +1164,7 @@ static int bd718xx_probe(struct platform_device *pdev) }; int i, j, err; + bool use_snvs; mfd = dev_get_drvdata(pdev->dev.parent); if (!mfd) { @@ -1055,27 +1191,28 @@ static int bd718xx_probe(struct platform_device *pdev) BD718XX_REG_REGLOCK); } - /* At poweroff transition PMIC HW disables EN bit for regulators but - * leaves SEL bit untouched. So if state transition from POWEROFF - * is done to SNVS - then all power rails controlled by SW (having - * SEL bit set) stay disabled as EN is cleared. This may result boot - * failure if any crucial systems are powered by these rails. - * + use_snvs = of_property_read_bool(pdev->dev.parent->of_node, + "rohm,reset-snvs-powered"); + + /* * Change the next stage from poweroff to be READY instead of SNVS * for all reset types because OTP loading at READY will clear SEL * bit allowing HW defaults for power rails to be used */ - err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1, - BD718XX_ON_REQ_POWEROFF_MASK | - BD718XX_SWRESET_POWEROFF_MASK | - BD718XX_WDOG_POWEROFF_MASK | - BD718XX_KEY_L_POWEROFF_MASK, - BD718XX_POWOFF_TO_RDY); - if (err) { - dev_err(&pdev->dev, "Failed to change reset target\n"); - goto err; - } else { - dev_dbg(&pdev->dev, "Changed all resets from SVNS to READY\n"); + if (!use_snvs) { + err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1, + BD718XX_ON_REQ_POWEROFF_MASK | + BD718XX_SWRESET_POWEROFF_MASK | + BD718XX_WDOG_POWEROFF_MASK | + BD718XX_KEY_L_POWEROFF_MASK, + BD718XX_POWOFF_TO_RDY); + if (err) { + dev_err(&pdev->dev, "Failed to change reset target\n"); + goto err; + } else { + dev_dbg(&pdev->dev, + "Changed all resets from SVNS to READY\n"); + } } for (i = 0; i < pmic_regulators[mfd->chip_type].r_amount; i++) { @@ -1098,19 +1235,33 @@ static int bd718xx_probe(struct platform_device *pdev) err = PTR_ERR(rdev); goto err; } - /* Regulator register gets the regulator constraints and + + /* + * Regulator register gets the regulator constraints and * applies them (set_machine_constraints). This should have * turned the control register(s) to correct values and we * can now switch the control from PMIC state machine to the * register interface + * + * At poweroff transition PMIC HW disables EN bit for + * regulators but leaves SEL bit untouched. So if state + * transition from POWEROFF is done to SNVS - then all power + * rails controlled by SW (having SEL bit set) stay disabled + * as EN is cleared. This will result boot failure if any + * crucial systems are powered by these rails. We don't + * enable SW control for crucial regulators if snvs state is + * used */ - err = regmap_update_bits(mfd->regmap, r->init.reg, - r->init.mask, r->init.val); - if (err) { - dev_err(&pdev->dev, - "Failed to write BUCK/LDO SEL bit for (%s)\n", - desc->name); - goto err; + if (!use_snvs || !rdev->constraints->always_on || + !rdev->constraints->boot_on) { + err = regmap_update_bits(mfd->regmap, r->init.reg, + r->init.mask, r->init.val); + if (err) { + dev_err(&pdev->dev, + "Failed to take control for (%s)\n", + desc->name); + goto err; + } } for (j = 0; j < r->additional_init_amnt; j++) { err = regmap_update_bits(mfd->regmap, diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c index e12dd1f750f3..e690c2ce5b3c 100644 --- a/drivers/regulator/bd9571mwv-regulator.c +++ b/drivers/regulator/bd9571mwv-regulator.c @@ -100,7 +100,7 @@ static int bd9571mwv_reg_set_voltage_sel_regmap(struct regulator_dev *rdev, } /* Operations permitted on AVS voltage regulator */ -static struct regulator_ops avs_ops = { +static const struct regulator_ops avs_ops = { .set_voltage_sel = bd9571mwv_avs_set_voltage_sel_regmap, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = bd9571mwv_avs_get_voltage_sel_regmap, @@ -108,7 +108,7 @@ static struct regulator_ops avs_ops = { }; /* Operations permitted on voltage regulators */ -static struct regulator_ops reg_ops = { +static const struct regulator_ops reg_ops = { .set_voltage_sel = bd9571mwv_reg_set_voltage_sel_regmap, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, @@ -116,13 +116,13 @@ static struct regulator_ops reg_ops = { }; /* Operations permitted on voltage monitors */ -static struct regulator_ops vid_ops = { +static const struct regulator_ops vid_ops = { .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, }; -static struct regulator_desc regulators[] = { +static const struct regulator_desc regulators[] = { BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f, 0x80, 600000, 10000, 0x3c), BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf, diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b9d7b45c7295..68473d0cc57e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -23,7 +23,6 @@ #include <linux/mutex.h> #include <linux/suspend.h> #include <linux/delay.h> -#include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/regmap.h> @@ -82,7 +81,6 @@ struct regulator_enable_gpio { struct gpio_desc *gpiod; u32 enable_count; /* a number of enabled shared GPIO */ u32 request_count; /* a number of requested shared GPIO */ - unsigned int ena_gpio_invert:1; }; /* @@ -145,14 +143,6 @@ static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops) return false; } -static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev) -{ - if (rdev && rdev->supply) - return rdev->supply->rdev; - - return NULL; -} - /** * regulator_lock_nested - lock a single regulator * @rdev: regulator source @@ -326,7 +316,7 @@ err_unlock: * @rdev: regulator source * @ww_ctx: w/w mutex acquire context * - * Unlock all regulators related with rdev by coupling or suppling. + * Unlock all regulators related with rdev by coupling or supplying. */ static void regulator_unlock_dependent(struct regulator_dev *rdev, struct ww_acquire_ctx *ww_ctx) @@ -341,7 +331,7 @@ static void regulator_unlock_dependent(struct regulator_dev *rdev, * @ww_ctx: w/w mutex acquire context * * This function as a wrapper on regulator_lock_recursive(), which locks - * all regulators related with rdev by coupling or suppling. + * all regulators related with rdev by coupling or supplying. */ static void regulator_lock_dependent(struct regulator_dev *rdev, struct ww_acquire_ctx *ww_ctx) @@ -924,14 +914,14 @@ static int drms_uA_update(struct regulator_dev *rdev) int current_uA = 0, output_uV, input_uV, err; unsigned int mode; - lockdep_assert_held_once(&rdev->mutex.base); - /* * first check to see if we can set modes at all, otherwise just * tell the consumer everything is OK. */ - if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS)) + if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS)) { + rdev_dbg(rdev, "DRMS operation not allowed\n"); return 0; + } if (!rdev->desc->ops->get_optimum_mode && !rdev->desc->ops->set_load) @@ -1003,7 +993,7 @@ static int suspend_set_state(struct regulator_dev *rdev, if (rstate == NULL) return 0; - /* If we have no suspend mode configration don't set anything; + /* If we have no suspend mode configuration don't set anything; * only warn if the driver implements set_suspend_voltage or * set_suspend_mode callback. */ @@ -1131,7 +1121,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, int current_uV = _regulator_get_voltage(rdev); if (current_uV == -ENOTRECOVERABLE) { - /* This regulator can't be read and must be initted */ + /* This regulator can't be read and must be initialized */ rdev_info(rdev, "Setting %d-%duV\n", rdev->constraints->min_uV, rdev->constraints->max_uV); @@ -1349,7 +1339,9 @@ static int set_machine_constraints(struct regulator_dev *rdev, * We'll only apply the initial system load if an * initial mode wasn't specified. */ + regulator_lock(rdev); drms_uA_update(rdev); + regulator_unlock(rdev); } if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable) @@ -1780,7 +1772,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) struct device *dev = rdev->dev.parent; int ret; - /* No supply to resovle? */ + /* No supply to resolve? */ if (!rdev->supply_name) return 0; @@ -2058,15 +2050,7 @@ static void _regulator_put(struct regulator *regulator) debugfs_remove_recursive(regulator->debugfs); if (regulator->dev) { - int count = 0; - struct regulator *r; - - list_for_each_entry(r, &rdev->consumer_list, list) - if (r->dev == regulator->dev) - count++; - - if (count == 1) - device_link_remove(regulator->dev, &rdev->dev); + device_link_remove(regulator->dev, &rdev->dev); /* remove any sysfs entries */ sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); @@ -2237,38 +2221,21 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, { struct regulator_enable_gpio *pin; struct gpio_desc *gpiod; - int ret; - if (config->ena_gpiod) - gpiod = config->ena_gpiod; - else - gpiod = gpio_to_desc(config->ena_gpio); + gpiod = config->ena_gpiod; list_for_each_entry(pin, ®ulator_ena_gpio_list, list) { if (pin->gpiod == gpiod) { - rdev_dbg(rdev, "GPIO %d is already used\n", - config->ena_gpio); + rdev_dbg(rdev, "GPIO is already used\n"); goto update_ena_gpio_to_rdev; } } - if (!config->ena_gpiod) { - ret = gpio_request_one(config->ena_gpio, - GPIOF_DIR_OUT | config->ena_gpio_flags, - rdev_get_name(rdev)); - if (ret) - return ret; - } - pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL); - if (pin == NULL) { - if (!config->ena_gpiod) - gpio_free(config->ena_gpio); + if (pin == NULL) return -ENOMEM; - } pin->gpiod = gpiod; - pin->ena_gpio_invert = config->ena_gpio_invert; list_add(&pin->list, ®ulator_ena_gpio_list); update_ena_gpio_to_rdev: @@ -2289,7 +2256,6 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev) if (pin->gpiod == rdev->ena_pin->gpiod) { if (pin->request_count <= 1) { pin->request_count = 0; - gpiod_put(pin->gpiod); list_del(&pin->list); kfree(pin); rdev->ena_pin = NULL; @@ -2319,8 +2285,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) if (enable) { /* Enable GPIO at initial use */ if (pin->enable_count == 0) - gpiod_set_value_cansleep(pin->gpiod, - !pin->ena_gpio_invert); + gpiod_set_value_cansleep(pin->gpiod, 1); pin->enable_count++; } else { @@ -2331,8 +2296,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) /* Disable GPIO if not used */ if (pin->enable_count <= 1) { - gpiod_set_value_cansleep(pin->gpiod, - pin->ena_gpio_invert); + gpiod_set_value_cansleep(pin->gpiod, 0); pin->enable_count = 0; } } @@ -2409,7 +2373,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev) * timer wrapping. * in case of multiple timer wrapping, either it can be * detected by out-of-range remaining, or it cannot be - * detected and we gets a panelty of + * detected and we get a penalty of * _regulator_enable_delay(). */ remaining = intended - start_jiffy; @@ -2809,7 +2773,7 @@ static void regulator_disable_work(struct work_struct *work) /** * regulator_disable_deferred - disable regulator output with delay * @regulator: regulator source - * @ms: miliseconds until the regulator is disabled + * @ms: milliseconds until the regulator is disabled * * Execute regulator_disable() on the regulator after a delay. This * is intended for use with devices that require some time to quiesce. @@ -4943,7 +4907,7 @@ regulator_register(const struct regulator_desc *regulator_desc, * device tree until we have handled it over to the core. If the * config that was passed in to this function DOES NOT contain * a descriptor, and the config after this call DOES contain - * a descriptor, we definately got one from parsing the device + * a descriptor, we definitely got one from parsing the device * tree. */ if (!cfg->ena_gpiod && config->ena_gpiod) @@ -4975,15 +4939,13 @@ regulator_register(const struct regulator_desc *regulator_desc, goto clean; } - if (config->ena_gpiod || - ((config->ena_gpio || config->ena_gpio_initialized) && - gpio_is_valid(config->ena_gpio))) { + if (config->ena_gpiod) { mutex_lock(®ulator_list_mutex); ret = regulator_ena_gpio_request(rdev, config); mutex_unlock(®ulator_list_mutex); if (ret != 0) { - rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", - config->ena_gpio, ret); + rdev_err(rdev, "Failed to request enable GPIO: %d\n", + ret); goto clean; } /* The regulator core took over the GPIO descriptor */ @@ -5251,6 +5213,12 @@ struct device *rdev_get_dev(struct regulator_dev *rdev) } EXPORT_SYMBOL_GPL(rdev_get_dev); +struct regmap *rdev_get_regmap(struct regulator_dev *rdev) +{ + return rdev->regmap; +} +EXPORT_SYMBOL_GPL(rdev_get_regmap); + void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data) { return reg_init_data->driver_data; diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c index 2131457937b7..e7dab5c4d1d1 100644 --- a/drivers/regulator/cpcap-regulator.c +++ b/drivers/regulator/cpcap-regulator.c @@ -100,12 +100,11 @@ struct cpcap_regulator { struct regulator_desc rdesc; const u16 assign_reg; const u16 assign_mask; - const u16 vsel_shift; }; #define CPCAP_REG(_ID, reg, assignment_reg, assignment_mask, val_tbl, \ - mode_mask, volt_mask, volt_shft, \ - mode_val, off_val, volt_trans_time) { \ + mode_mask, volt_mask, mode_val, off_val, \ + volt_trans_time) { \ .rdesc = { \ .name = #_ID, \ .of_match = of_match_ptr(#_ID), \ @@ -127,7 +126,6 @@ struct cpcap_regulator { }, \ .assign_reg = (assignment_reg), \ .assign_mask = (assignment_mask), \ - .vsel_shift = (volt_shft), \ } struct cpcap_ddata { @@ -336,155 +334,155 @@ static const unsigned int vaudio_val_tbl[] = { 0, 2775000, }; * SW1 to SW4 and SW6 seems to be unused for mapphone. Note that VSIM and * VSIMCARD have a shared resource assignment bit. */ -static struct cpcap_regulator omap4_regulators[] = { +static const struct cpcap_regulator omap4_regulators[] = { CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW1_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW2_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW3_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW4_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW5_SEL, sw5_val_tbl, - 0x28, 0, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0), + 0x28, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0), CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW6_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VCAM_SEL, vcam_val_tbl, - 0x87, 0x30, 4, 0x3, 0, 420), + 0x87, 0x30, 0x3, 0, 420), CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VCSI_SEL, vcsi_val_tbl, - 0x47, 0x10, 4, 0x43, 0x41, 350), + 0x47, 0x10, 0x43, 0x41, 350), CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VDAC_SEL, vdac_val_tbl, - 0x87, 0x30, 4, 0x3, 0, 420), + 0x87, 0x30, 0x3, 0, 420), CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VDIG_SEL, vdig_val_tbl, - 0x87, 0x30, 4, 0x82, 0, 420), + 0x87, 0x30, 0x82, 0, 420), CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl, - 0x80, 0xf, 0, 0x80, 0, 420), + 0x80, 0xf, 0x80, 0, 420), CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl, - 0x17, 0, 0, 0, 0x12, 0), + 0x17, 0, 0, 0x12, 0), CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl, - 0x87, 0x38, 3, 0x82, 0, 420), + 0x87, 0x38, 0x82, 0, 420), CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VPLL_SEL, vpll_val_tbl, - 0x43, 0x18, 3, 0x2, 0, 420), + 0x43, 0x18, 0x2, 0, 420), CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRF1_SEL, vrf1_val_tbl, - 0xac, 0x2, 1, 0x4, 0, 10), + 0xac, 0x2, 0x4, 0, 10), CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRF2_SEL, vrf2_val_tbl, - 0x23, 0x8, 3, 0, 0, 10), + 0x23, 0x8, 0, 0, 10), CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl, - 0x23, 0x8, 3, 0, 0, 420), + 0x23, 0x8, 0, 0, 420), CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl, - 0x47, 0x10, 4, 0, 0, 420), + 0x47, 0x10, 0, 0, 420), CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl, - 0x20c, 0xc0, 6, 0x20c, 0, 420), + 0x20c, 0xc0, 0x20c, 0, 420), CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3, 0xffff, vsim_val_tbl, - 0x23, 0x8, 3, 0x3, 0, 420), + 0x23, 0x8, 0x3, 0, 420), CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3, 0xffff, vsimcard_val_tbl, - 0x1e80, 0x8, 3, 0x1e00, 0, 420), + 0x1e80, 0x8, 0x1e00, 0, 420), CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VVIB_SEL, vvib_val_tbl, - 0x1, 0xc, 2, 0x1, 0, 500), + 0x1, 0xc, 0x1, 0, 500), CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VUSB_SEL, vusb_val_tbl, - 0x11c, 0x40, 6, 0xc, 0, 0), + 0x11c, 0x40, 0xc, 0, 0), CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4, CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl, - 0x16, 0x1, 0, 0x4, 0, 0), + 0x16, 0x1, 0x4, 0, 0), { /* sentinel */ }, }; -static struct cpcap_regulator xoom_regulators[] = { +static const struct cpcap_regulator xoom_regulators[] = { CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW1_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW2_SEL, sw2_sw4_val_tbl, - 0xf00, 0x7f, 0, 0x800, 0, 120), + 0xf00, 0x7f, 0x800, 0, 120), CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW3_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW4_SEL, sw2_sw4_val_tbl, - 0xf00, 0x7f, 0, 0x900, 0, 100), + 0xf00, 0x7f, 0x900, 0, 100), CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW5_SEL, sw5_val_tbl, - 0x2a, 0, 0, 0x22, 0, 0), + 0x2a, 0, 0x22, 0, 0), CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW6_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VCAM_SEL, vcam_val_tbl, - 0x87, 0x30, 4, 0x7, 0, 420), + 0x87, 0x30, 0x7, 0, 420), CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VCSI_SEL, vcsi_val_tbl, - 0x47, 0x10, 4, 0x7, 0, 350), + 0x47, 0x10, 0x7, 0, 350), CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VDAC_SEL, vdac_val_tbl, - 0x87, 0x30, 4, 0x3, 0, 420), + 0x87, 0x30, 0x3, 0, 420), CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VDIG_SEL, vdig_val_tbl, - 0x87, 0x30, 4, 0x5, 0, 420), + 0x87, 0x30, 0x5, 0, 420), CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl, - 0x80, 0xf, 0, 0x80, 0, 420), + 0x80, 0xf, 0x80, 0, 420), CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl, - 0x17, 0, 0, 0x2, 0, 0), + 0x17, 0, 0x2, 0, 0), CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl, - 0x87, 0x38, 3, 0x2, 0, 420), + 0x87, 0x38, 0x2, 0, 420), CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VPLL_SEL, vpll_val_tbl, - 0x43, 0x18, 3, 0x1, 0, 420), + 0x43, 0x18, 0x1, 0, 420), CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRF1_SEL, vrf1_val_tbl, - 0xac, 0x2, 1, 0xc, 0, 10), + 0xac, 0x2, 0xc, 0, 10), CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRF2_SEL, vrf2_val_tbl, - 0x23, 0x8, 3, 0x3, 0, 10), + 0x23, 0x8, 0x3, 0, 10), CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl, - 0x23, 0x8, 3, 0x3, 0, 420), + 0x23, 0x8, 0x3, 0, 420), CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl, - 0x47, 0x10, 4, 0x5, 0, 420), + 0x47, 0x10, 0x5, 0, 420), CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl, - 0x20c, 0xc0, 6, 0x8, 0, 420), + 0x20c, 0xc0, 0x8, 0, 420), CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3, 0xffff, vsim_val_tbl, - 0x23, 0x8, 3, 0x3, 0, 420), + 0x23, 0x8, 0x3, 0, 420), CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3, 0xffff, vsimcard_val_tbl, - 0x1e80, 0x8, 3, 0x1e00, 0, 420), + 0x1e80, 0x8, 0x1e00, 0, 420), CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VVIB_SEL, vvib_val_tbl, - 0x1, 0xc, 2, 0, 0x1, 500), + 0x1, 0xc, 0, 0x1, 500), CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VUSB_SEL, vusb_val_tbl, - 0x11c, 0x40, 6, 0xc, 0, 0), + 0x11c, 0x40, 0xc, 0, 0), CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4, CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl, - 0x16, 0x1, 0, 0x4, 0, 0), + 0x16, 0x1, 0x4, 0, 0), { /* sentinel */ }, }; diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c index 207cb3859dcc..cefa3558236d 100644 --- a/drivers/regulator/da9052-regulator.c +++ b/drivers/regulator/da9052-regulator.c @@ -290,10 +290,10 @@ static const struct regulator_ops da9052_ldo_ops = { .disable = regulator_disable_regmap, }; -#define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \ +#define DA9052_LDO(_id, _name, step, min, max, sbits, ebits, abits) \ {\ .reg_desc = {\ - .name = #_id,\ + .name = #_name,\ .ops = &da9052_ldo_ops,\ .type = REGULATOR_VOLTAGE,\ .id = DA9052_ID_##_id,\ @@ -310,10 +310,10 @@ static const struct regulator_ops da9052_ldo_ops = { .activate_bit = (abits),\ } -#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \ +#define DA9052_DCDC(_id, _name, step, min, max, sbits, ebits, abits) \ {\ .reg_desc = {\ - .name = #_id,\ + .name = #_name,\ .ops = &da9052_dcdc_ops,\ .type = REGULATOR_VOLTAGE,\ .id = DA9052_ID_##_id,\ @@ -331,37 +331,37 @@ static const struct regulator_ops da9052_ldo_ops = { } static struct da9052_regulator_info da9052_regulator_info[] = { - DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), - DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), - DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), - DA9052_DCDC(BUCK4, 50, 1800, 3600, 5, 6, 0), - DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0), - DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO), - DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO), - DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0), - DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0), - DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0), + DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), + DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), + DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), + DA9052_DCDC(BUCK4, buck4, 50, 1800, 3600, 5, 6, 0), + DA9052_LDO(LDO1, ldo1, 50, 600, 1800, 5, 6, 0), + DA9052_LDO(LDO2, ldo2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO), + DA9052_LDO(LDO3, ldo3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO), + DA9052_LDO(LDO4, ldo4, 25, 1725, 3300, 6, 6, 0), + DA9052_LDO(LDO5, ldo5, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO6, ldo6, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO7, ldo7, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO8, ldo8, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO9, ldo9, 50, 1250, 3650, 6, 6, 0), + DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0), }; static struct da9052_regulator_info da9053_regulator_info[] = { - DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), - DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), - DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), - DA9052_DCDC(BUCK4, 25, 950, 2525, 6, 6, 0), - DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0), - DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO), - DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO), - DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0), - DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0), - DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0), + DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), + DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), + DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), + DA9052_DCDC(BUCK4, buck4, 25, 950, 2525, 6, 6, 0), + DA9052_LDO(LDO1, ldo1, 50, 600, 1800, 5, 6, 0), + DA9052_LDO(LDO2, ldo2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO), + DA9052_LDO(LDO3, ldo3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO), + DA9052_LDO(LDO4, ldo4, 25, 1725, 3300, 6, 6, 0), + DA9052_LDO(LDO5, ldo5, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO6, ldo6, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO7, ldo7, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO8, ldo8, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO9, ldo9, 50, 1250, 3650, 6, 6, 0), + DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0), }; static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id, diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c index 588c3d2445cf..3c6fac793658 100644 --- a/drivers/regulator/da9055-regulator.c +++ b/drivers/regulator/da9055-regulator.c @@ -48,7 +48,9 @@ #define DA9055_ID_LDO6 7 /* DA9055 BUCK current limit */ -static const int da9055_current_limits[] = { 500000, 600000, 700000, 800000 }; +static const unsigned int da9055_current_limits[] = { + 500000, 600000, 700000, 800000 +}; struct da9055_conf_reg { int reg; @@ -169,39 +171,6 @@ static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) val << volt.sl_shift); } -static int da9055_buck_get_current_limit(struct regulator_dev *rdev) -{ - struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; - int ret; - - ret = da9055_reg_read(regulator->da9055, DA9055_REG_BUCK_LIM); - if (ret < 0) - return ret; - - ret &= info->mode.mask; - return da9055_current_limits[ret >> info->mode.shift]; -} - -static int da9055_buck_set_current_limit(struct regulator_dev *rdev, int min_uA, - int max_uA) -{ - struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; - int i; - - for (i = ARRAY_SIZE(da9055_current_limits) - 1; i >= 0; i--) { - if ((min_uA <= da9055_current_limits[i]) && - (da9055_current_limits[i] <= max_uA)) - return da9055_reg_update(regulator->da9055, - DA9055_REG_BUCK_LIM, - info->mode.mask, - i << info->mode.shift); - } - - return -EINVAL; -} - static int da9055_regulator_get_voltage_sel(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); @@ -329,8 +298,8 @@ static const struct regulator_ops da9055_buck_ops = { .get_mode = da9055_buck_get_mode, .set_mode = da9055_buck_set_mode, - .get_current_limit = da9055_buck_get_current_limit, - .set_current_limit = da9055_buck_set_current_limit, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, .get_voltage_sel = da9055_regulator_get_voltage_sel, .set_voltage_sel = da9055_regulator_set_voltage_sel, @@ -407,6 +376,10 @@ static const struct regulator_ops da9055_ldo_ops = { .uV_step = (step) * 1000,\ .linear_min_sel = (voffset),\ .owner = THIS_MODULE,\ + .curr_table = da9055_current_limits,\ + .n_current_limits = ARRAY_SIZE(da9055_current_limits),\ + .csel_reg = DA9055_REG_BUCK_LIM,\ + .csel_mask = (mbits),\ },\ .conf = {\ .reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \ @@ -457,7 +430,6 @@ static int da9055_gpio_init(struct da9055_regulator *regulator, int gpio_mux = pdata->gpio_ren[id]; config->ena_gpiod = pdata->ena_gpiods[id]; - config->ena_gpio_invert = 1; /* * GPI pin is muxed with regulator to control the diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c index 34a70d9dc450..b064d8a19d4c 100644 --- a/drivers/regulator/da9062-regulator.c +++ b/drivers/regulator/da9062-regulator.c @@ -126,7 +126,7 @@ static int da9062_set_current_limit(struct regulator_dev *rdev, const struct da9062_regulator_info *rinfo = regl->info; int n, tval; - for (n = 0; n < rinfo->n_current_limits; n++) { + for (n = rinfo->n_current_limits - 1; n >= 0; n--) { tval = rinfo->current_limits[n]; if (tval >= min_ua && tval <= max_ua) return regmap_field_write(regl->ilimit, n); @@ -992,7 +992,6 @@ static int da9062_regulator_probe(struct platform_device *pdev) struct regulator_config config = { }; const struct da9062_regulator_info *rinfo; int irq, n, ret; - size_t size; int max_regulators; switch (chip->chip_type) { @@ -1010,9 +1009,8 @@ static int da9062_regulator_probe(struct platform_device *pdev) } /* Allocate memory required by usable regulators */ - size = sizeof(struct da9062_regulators) + - max_regulators * sizeof(struct da9062_regulator); - regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + regulators = devm_kzalloc(&pdev->dev, struct_size(regulators, regulator, + max_regulators), GFP_KERNEL); if (!regulators) return -ENOMEM; @@ -1029,31 +1027,50 @@ static int da9062_regulator_probe(struct platform_device *pdev) regl->desc.type = REGULATOR_VOLTAGE; regl->desc.owner = THIS_MODULE; - if (regl->info->mode.reg) + if (regl->info->mode.reg) { regl->mode = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->mode); - if (regl->info->suspend.reg) + if (IS_ERR(regl->mode)) + return PTR_ERR(regl->mode); + } + + if (regl->info->suspend.reg) { regl->suspend = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->suspend); - if (regl->info->sleep.reg) + if (IS_ERR(regl->suspend)) + return PTR_ERR(regl->suspend); + } + + if (regl->info->sleep.reg) { regl->sleep = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->sleep); - if (regl->info->suspend_sleep.reg) + if (IS_ERR(regl->sleep)) + return PTR_ERR(regl->sleep); + } + + if (regl->info->suspend_sleep.reg) { regl->suspend_sleep = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->suspend_sleep); - if (regl->info->ilimit.reg) + if (IS_ERR(regl->suspend_sleep)) + return PTR_ERR(regl->suspend_sleep); + } + + if (regl->info->ilimit.reg) { regl->ilimit = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->ilimit); + if (IS_ERR(regl->ilimit)) + return PTR_ERR(regl->ilimit); + } /* Register regulator */ memset(&config, 0, sizeof(config)); diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index 8cbcd2a3eb20..2b0c7a85306a 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -167,7 +167,7 @@ static int da9063_set_current_limit(struct regulator_dev *rdev, const struct da9063_regulator_info *rinfo = regl->info; int n, tval; - for (n = 0; n < rinfo->n_current_limits; n++) { + for (n = rinfo->n_current_limits - 1; n >= 0; n--) { tval = rinfo->current_limits[n]; if (tval >= min_uA && tval <= max_uA) return regmap_field_write(regl->ilimit, n); @@ -739,7 +739,6 @@ static int da9063_regulator_probe(struct platform_device *pdev) struct regulator_config config; bool bcores_merged, bmem_bio_merged; int id, irq, n, n_regulators, ret, val; - size_t size; regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL; @@ -784,9 +783,8 @@ static int da9063_regulator_probe(struct platform_device *pdev) n_regulators--; /* remove BMEM_BIO_MERGED */ /* Allocate memory required by usable regulators */ - size = sizeof(struct da9063_regulators) + - n_regulators * sizeof(struct da9063_regulator); - regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + regulators = devm_kzalloc(&pdev->dev, struct_size(regulators, + regulator, n_regulators), GFP_KERNEL); if (!regulators) return -ENOMEM; @@ -835,21 +833,40 @@ static int da9063_regulator_probe(struct platform_device *pdev) regl->desc.type = REGULATOR_VOLTAGE; regl->desc.owner = THIS_MODULE; - if (regl->info->mode.reg) + if (regl->info->mode.reg) { regl->mode = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->mode); - if (regl->info->suspend.reg) + if (IS_ERR(regl->mode)) + return PTR_ERR(regl->mode); + } + + if (regl->info->suspend.reg) { regl->suspend = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend); - if (regl->info->sleep.reg) + if (IS_ERR(regl->suspend)) + return PTR_ERR(regl->suspend); + } + + if (regl->info->sleep.reg) { regl->sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->sleep); - if (regl->info->suspend_sleep.reg) + if (IS_ERR(regl->sleep)) + return PTR_ERR(regl->sleep); + } + + if (regl->info->suspend_sleep.reg) { regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend_sleep); - if (regl->info->ilimit.reg) + if (IS_ERR(regl->suspend_sleep)) + return PTR_ERR(regl->suspend_sleep); + } + + if (regl->info->ilimit.reg) { regl->ilimit = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->ilimit); + if (IS_ERR(regl->ilimit)) + return PTR_ERR(regl->ilimit); + } /* Register regulator */ memset(&config, 0, sizeof(config)); diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index 84dba64ed11e..528303771723 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -41,10 +41,6 @@ static const struct regmap_config da9210_regmap_config = { .val_bits = 8, }; -static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA, - int max_uA); -static int da9210_get_current_limit(struct regulator_dev *rdev); - static const struct regulator_ops da9210_buck_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, @@ -52,8 +48,8 @@ static const struct regulator_ops da9210_buck_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, - .set_current_limit = da9210_set_current_limit, - .get_current_limit = da9210_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; /* Default limits measured in millivolts and milliamps */ @@ -62,7 +58,7 @@ static const struct regulator_ops da9210_buck_ops = { #define DA9210_STEP_MV 10 /* Current limits for buck (uA) indices corresponds with register values */ -static const int da9210_buck_limits[] = { +static const unsigned int da9210_buck_limits[] = { 1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000 }; @@ -80,47 +76,12 @@ static const struct regulator_desc da9210_reg = { .enable_reg = DA9210_REG_BUCK_CONT, .enable_mask = DA9210_BUCK_EN, .owner = THIS_MODULE, + .curr_table = da9210_buck_limits, + .n_current_limits = ARRAY_SIZE(da9210_buck_limits), + .csel_reg = DA9210_REG_BUCK_ILIM, + .csel_mask = DA9210_BUCK_ILIM_MASK, }; -static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA, - int max_uA) -{ - struct da9210 *chip = rdev_get_drvdata(rdev); - unsigned int sel; - int i; - - /* search for closest to maximum */ - for (i = ARRAY_SIZE(da9210_buck_limits)-1; i >= 0; i--) { - if (min_uA <= da9210_buck_limits[i] && - max_uA >= da9210_buck_limits[i]) { - sel = i; - sel = sel << DA9210_BUCK_ILIM_SHIFT; - return regmap_update_bits(chip->regmap, - DA9210_REG_BUCK_ILIM, - DA9210_BUCK_ILIM_MASK, sel); - } - } - - return -EINVAL; -} - -static int da9210_get_current_limit(struct regulator_dev *rdev) -{ - struct da9210 *chip = rdev_get_drvdata(rdev); - unsigned int data; - unsigned int sel; - int ret; - - ret = regmap_read(chip->regmap, DA9210_REG_BUCK_ILIM, &data); - if (ret < 0) - return ret; - - /* select one of 16 values: 0000 (1600mA) to 1111 (4600mA) */ - sel = (data & DA9210_BUCK_ILIM_MASK) >> DA9210_BUCK_ILIM_SHIFT; - - return da9210_buck_limits[sel]; -} - static irqreturn_t da9210_irq_handler(int irq, void *data) { struct da9210 *chip = data; diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index a3bc8037153e..771a06d1900d 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c @@ -40,7 +40,6 @@ /* VSEL bit definitions */ #define VSEL_BUCK_EN (1 << 7) #define VSEL_MODE (1 << 6) -#define VSEL_NSEL_MASK 0x3F /* Chip ID and Verison */ #define DIE_ID 0x0F /* ID1 */ #define DIE_REV 0x0F /* ID2 */ @@ -49,14 +48,26 @@ #define CTL_SLEW_MASK (0x7 << 4) #define CTL_SLEW_SHIFT 4 #define CTL_RESET (1 << 2) +#define CTL_MODE_VSEL0_MODE BIT(0) +#define CTL_MODE_VSEL1_MODE BIT(1) #define FAN53555_NVOLTAGES 64 /* Numbers of voltages */ +#define FAN53526_NVOLTAGES 128 enum fan53555_vendor { - FAN53555_VENDOR_FAIRCHILD = 0, + FAN53526_VENDOR_FAIRCHILD = 0, + FAN53555_VENDOR_FAIRCHILD, FAN53555_VENDOR_SILERGY, }; +enum { + FAN53526_CHIP_ID_01 = 1, +}; + +enum { + FAN53526_CHIP_REV_08 = 8, +}; + /* IC Type */ enum { FAN53555_CHIP_ID_00 = 0, @@ -94,8 +105,12 @@ struct fan53555_device_info { /* Voltage range and step(linear) */ unsigned int vsel_min; unsigned int vsel_step; + unsigned int vsel_count; /* Voltage slew rate limiting */ unsigned int slew_rate; + /* Mode */ + unsigned int mode_reg; + unsigned int mode_mask; /* Sleep voltage cache */ unsigned int sleep_vol_cache; }; @@ -111,7 +126,7 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV) if (ret < 0) return ret; ret = regmap_update_bits(di->regmap, di->sleep_reg, - VSEL_NSEL_MASK, ret); + di->desc.vsel_mask, ret); if (ret < 0) return ret; /* Cache the sleep voltage setting. @@ -143,11 +158,11 @@ static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode) switch (mode) { case REGULATOR_MODE_FAST: - regmap_update_bits(di->regmap, di->vol_reg, - VSEL_MODE, VSEL_MODE); + regmap_update_bits(di->regmap, di->mode_reg, + di->mode_mask, di->mode_mask); break; case REGULATOR_MODE_NORMAL: - regmap_update_bits(di->regmap, di->vol_reg, VSEL_MODE, 0); + regmap_update_bits(di->regmap, di->vol_reg, di->mode_mask, 0); break; default: return -EINVAL; @@ -161,10 +176,10 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev) unsigned int val; int ret = 0; - ret = regmap_read(di->regmap, di->vol_reg, &val); + ret = regmap_read(di->regmap, di->mode_reg, &val); if (ret < 0) return ret; - if (val & VSEL_MODE) + if (val & di->mode_mask) return REGULATOR_MODE_FAST; else return REGULATOR_MODE_NORMAL; @@ -219,6 +234,34 @@ static const struct regulator_ops fan53555_regulator_ops = { .set_suspend_disable = fan53555_set_suspend_disable, }; +static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di) +{ + /* Init voltage range and step */ + switch (di->chip_id) { + case FAN53526_CHIP_ID_01: + switch (di->chip_rev) { + case FAN53526_CHIP_REV_08: + di->vsel_min = 600000; + di->vsel_step = 6250; + break; + default: + dev_err(di->dev, + "Chip ID %d with rev %d not supported!\n", + di->chip_id, di->chip_rev); + return -EINVAL; + } + break; + default: + dev_err(di->dev, + "Chip ID %d not supported!\n", di->chip_id); + return -EINVAL; + } + + di->vsel_count = FAN53526_NVOLTAGES; + + return 0; +} + static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) { /* Init voltage range and step */ @@ -257,6 +300,8 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) return -EINVAL; } + di->vsel_count = FAN53555_NVOLTAGES; + return 0; } @@ -274,6 +319,8 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di) return -EINVAL; } + di->vsel_count = FAN53555_NVOLTAGES; + return 0; } @@ -302,7 +349,35 @@ static int fan53555_device_setup(struct fan53555_device_info *di, return -EINVAL; } + /* Setup mode control register */ + switch (di->vendor) { + case FAN53526_VENDOR_FAIRCHILD: + di->mode_reg = FAN53555_CONTROL; + + switch (pdata->sleep_vsel_id) { + case FAN53555_VSEL_ID_0: + di->mode_mask = CTL_MODE_VSEL1_MODE; + break; + case FAN53555_VSEL_ID_1: + di->mode_mask = CTL_MODE_VSEL0_MODE; + break; + } + break; + case FAN53555_VENDOR_FAIRCHILD: + case FAN53555_VENDOR_SILERGY: + di->mode_reg = di->vol_reg; + di->mode_mask = VSEL_MODE; + break; + default: + dev_err(di->dev, "vendor %d not supported!\n", di->vendor); + return -EINVAL; + } + + /* Setup voltage range */ switch (di->vendor) { + case FAN53526_VENDOR_FAIRCHILD: + ret = fan53526_voltages_setup_fairchild(di); + break; case FAN53555_VENDOR_FAIRCHILD: ret = fan53555_voltages_setup_fairchild(di); break; @@ -326,13 +401,13 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, rdesc->supply_name = "vin"; rdesc->ops = &fan53555_regulator_ops; rdesc->type = REGULATOR_VOLTAGE; - rdesc->n_voltages = FAN53555_NVOLTAGES; + rdesc->n_voltages = di->vsel_count; rdesc->enable_reg = di->vol_reg; rdesc->enable_mask = VSEL_BUCK_EN; rdesc->min_uV = di->vsel_min; rdesc->uV_step = di->vsel_step; rdesc->vsel_reg = di->vol_reg; - rdesc->vsel_mask = VSEL_NSEL_MASK; + rdesc->vsel_mask = di->vsel_count - 1; rdesc->owner = THIS_MODULE; di->rdev = devm_regulator_register(di->dev, &di->desc, config); @@ -368,6 +443,9 @@ static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev, static const struct of_device_id fan53555_dt_ids[] = { { + .compatible = "fcs,fan53526", + .data = (void *)FAN53526_VENDOR_FAIRCHILD, + }, { .compatible = "fcs,fan53555", .data = (void *)FAN53555_VENDOR_FAIRCHILD }, { @@ -412,11 +490,13 @@ static int fan53555_regulator_probe(struct i2c_client *client, } else { /* if no ramp constraint set, get the pdata ramp_delay */ if (!di->regulator->constraints.ramp_delay) { - int slew_idx = (pdata->slew_rate & 0x7) - ? pdata->slew_rate : 0; + if (pdata->slew_rate >= ARRAY_SIZE(slew_rates)) { + dev_err(&client->dev, "Invalid slew_rate\n"); + return -EINVAL; + } di->regulator->constraints.ramp_delay - = slew_rates[slew_idx]; + = slew_rates[pdata->slew_rate]; } di->vendor = id->driver_data; @@ -467,6 +547,9 @@ static int fan53555_regulator_probe(struct i2c_client *client, static const struct i2c_device_id fan53555_id[] = { { + .name = "fan53526", + .driver_data = FAN53526_VENDOR_FAIRCHILD + }, { .name = "fan53555", .driver_data = FAN53555_VENDOR_FAIRCHILD }, { diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index 9abdb9130766..b5afc9db2c61 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -79,15 +79,6 @@ of_get_fixed_voltage_config(struct device *dev, of_property_read_u32(np, "startup-delay-us", &config->startup_delay); - /* - * FIXME: we pulled active low/high and open drain handling into - * gpiolib so it will be handled there. Delete this in the second - * step when we also remove the custom inversion handling for all - * legacy boardfiles. - */ - config->enable_high = 1; - config->gpio_is_open_drain = 0; - if (of_find_property(np, "vin-supply", NULL)) config->input_supply = "vin"; @@ -151,24 +142,14 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) drvdata->desc.fixed_uV = config->microvolts; - cfg.ena_gpio_invert = !config->enable_high; - if (config->enabled_at_boot) { - if (config->enable_high) - gflags = GPIOD_OUT_HIGH; - else - gflags = GPIOD_OUT_LOW; - } else { - if (config->enable_high) - gflags = GPIOD_OUT_LOW; - else - gflags = GPIOD_OUT_HIGH; - } - if (config->gpio_is_open_drain) { - if (gflags == GPIOD_OUT_HIGH) - gflags = GPIOD_OUT_HIGH_OPEN_DRAIN; - else - gflags = GPIOD_OUT_LOW_OPEN_DRAIN; - } + /* + * The signal will be inverted by the GPIO core if flagged so in the + * decriptor. + */ + if (config->enabled_at_boot) + gflags = GPIOD_OUT_HIGH; + else + gflags = GPIOD_OUT_LOW; /* * Some fixed regulators share the enable line between two diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index b2f5ec4f658a..6157001df0a4 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -30,16 +30,15 @@ #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/gpio-regulator.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/slab.h> #include <linux/of.h> -#include <linux/of_gpio.h> struct gpio_regulator_data { struct regulator_desc desc; struct regulator_dev *dev; - struct gpio *gpios; + struct gpio_desc **gpiods; int nr_gpios; struct gpio_regulator_state *states; @@ -82,7 +81,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev, for (ptr = 0; ptr < data->nr_gpios; ptr++) { state = (target & (1 << ptr)) >> ptr; - gpio_set_value_cansleep(data->gpios[ptr].gpio, state); + gpiod_set_value_cansleep(data->gpiods[ptr], state); } data->state = target; @@ -119,7 +118,7 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev, for (ptr = 0; ptr < data->nr_gpios; ptr++) { state = (target & (1 << ptr)) >> ptr; - gpio_set_value_cansleep(data->gpios[ptr].gpio, state); + gpiod_set_value_cansleep(data->gpiods[ptr], state); } data->state = target; @@ -138,7 +137,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np, { struct gpio_regulator_config *config; const char *regtype; - int proplen, gpio, i; + int proplen, i; + int ngpios; int ret; config = devm_kzalloc(dev, @@ -153,59 +153,36 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np, config->supply_name = config->init_data->constraints.name; - if (of_property_read_bool(np, "enable-active-high")) - config->enable_high = true; - if (of_property_read_bool(np, "enable-at-boot")) config->enabled_at_boot = true; of_property_read_u32(np, "startup-delay-us", &config->startup_delay); - config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); - if (config->enable_gpio < 0 && config->enable_gpio != -ENOENT) - return ERR_PTR(config->enable_gpio); - - /* Fetch GPIOs. - optional property*/ - ret = of_gpio_count(np); - if ((ret < 0) && (ret != -ENOENT)) - return ERR_PTR(ret); - - if (ret > 0) { - config->nr_gpios = ret; - config->gpios = devm_kcalloc(dev, - config->nr_gpios, sizeof(struct gpio), - GFP_KERNEL); - if (!config->gpios) + /* Fetch GPIO init levels */ + ngpios = gpiod_count(dev, NULL); + if (ngpios > 0) { + config->gflags = devm_kzalloc(dev, + sizeof(enum gpiod_flags) + * ngpios, + GFP_KERNEL); + if (!config->gflags) return ERR_PTR(-ENOMEM); - proplen = of_property_count_u32_elems(np, "gpios-states"); - /* optional property */ - if (proplen < 0) - proplen = 0; + for (i = 0; i < ngpios; i++) { + u32 val; - if (proplen > 0 && proplen != config->nr_gpios) { - dev_warn(dev, "gpios <-> gpios-states mismatch\n"); - proplen = 0; - } + ret = of_property_read_u32_index(np, "gpios-states", i, + &val); - for (i = 0; i < config->nr_gpios; i++) { - gpio = of_get_named_gpio(np, "gpios", i); - if (gpio < 0) { - if (gpio != -ENOENT) - return ERR_PTR(gpio); - break; - } - config->gpios[i].gpio = gpio; - config->gpios[i].label = config->supply_name; - if (proplen > 0) { - of_property_read_u32_index(np, "gpios-states", - i, &ret); - if (ret) - config->gpios[i].flags = - GPIOF_OUT_INIT_HIGH; - } + /* Default to high per specification */ + if (ret) + config->gflags[i] = GPIOD_OUT_HIGH; + else + config->gflags[i] = + val ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; } } + config->ngpios = ngpios; /* Fetch states. */ proplen = of_property_count_u32_elems(np, "states"); @@ -251,59 +228,56 @@ static struct regulator_ops gpio_regulator_current_ops = { static int gpio_regulator_probe(struct platform_device *pdev) { - struct gpio_regulator_config *config = dev_get_platdata(&pdev->dev); - struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct gpio_regulator_config *config = dev_get_platdata(dev); + struct device_node *np = dev->of_node; struct gpio_regulator_data *drvdata; struct regulator_config cfg = { }; - int ptr, ret, state; + enum gpiod_flags gflags; + int ptr, ret, state, i; - drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data), + drvdata = devm_kzalloc(dev, sizeof(struct gpio_regulator_data), GFP_KERNEL); if (drvdata == NULL) return -ENOMEM; if (np) { - config = of_get_gpio_regulator_config(&pdev->dev, np, + config = of_get_gpio_regulator_config(dev, np, &drvdata->desc); if (IS_ERR(config)) return PTR_ERR(config); } - drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); + drvdata->desc.name = devm_kstrdup(dev, config->supply_name, GFP_KERNEL); if (drvdata->desc.name == NULL) { - dev_err(&pdev->dev, "Failed to allocate supply name\n"); + dev_err(dev, "Failed to allocate supply name\n"); return -ENOMEM; } - if (config->nr_gpios != 0) { - drvdata->gpios = kmemdup(config->gpios, - config->nr_gpios * sizeof(struct gpio), - GFP_KERNEL); - if (drvdata->gpios == NULL) { - dev_err(&pdev->dev, "Failed to allocate gpio data\n"); - ret = -ENOMEM; - goto err_name; - } - - drvdata->nr_gpios = config->nr_gpios; - ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "Could not obtain regulator setting GPIOs: %d\n", - ret); - goto err_memgpio; - } + drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *), + GFP_KERNEL); + if (!drvdata->gpiods) + return -ENOMEM; + for (i = 0; i < config->ngpios; i++) { + drvdata->gpiods[i] = devm_gpiod_get_index(dev, + NULL, + i, + config->gflags[i]); + if (IS_ERR(drvdata->gpiods[i])) + return PTR_ERR(drvdata->gpiods[i]); + /* This is good to know */ + gpiod_set_consumer_name(drvdata->gpiods[i], drvdata->desc.name); } + drvdata->nr_gpios = config->ngpios; - drvdata->states = kmemdup(config->states, - config->nr_states * - sizeof(struct gpio_regulator_state), - GFP_KERNEL); + drvdata->states = devm_kmemdup(dev, + config->states, + config->nr_states * + sizeof(struct gpio_regulator_state), + GFP_KERNEL); if (drvdata->states == NULL) { - dev_err(&pdev->dev, "Failed to allocate state data\n"); - ret = -ENOMEM; - goto err_stategpio; + dev_err(dev, "Failed to allocate state data\n"); + return -ENOMEM; } drvdata->nr_states = config->nr_states; @@ -322,61 +296,46 @@ static int gpio_regulator_probe(struct platform_device *pdev) drvdata->desc.ops = &gpio_regulator_current_ops; break; default: - dev_err(&pdev->dev, "No regulator type set\n"); - ret = -EINVAL; - goto err_memstate; + dev_err(dev, "No regulator type set\n"); + return -EINVAL; } /* build initial state from gpio init data. */ state = 0; for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) { - if (config->gpios[ptr].flags & GPIOF_OUT_INIT_HIGH) + if (config->gflags[ptr] == GPIOD_OUT_HIGH) state |= (1 << ptr); } drvdata->state = state; - cfg.dev = &pdev->dev; + cfg.dev = dev; cfg.init_data = config->init_data; cfg.driver_data = drvdata; cfg.of_node = np; - if (gpio_is_valid(config->enable_gpio)) { - cfg.ena_gpio = config->enable_gpio; - cfg.ena_gpio_initialized = true; - } - cfg.ena_gpio_invert = !config->enable_high; - if (config->enabled_at_boot) { - if (config->enable_high) - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - else - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; - } else { - if (config->enable_high) - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; - else - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - } + /* + * The signal will be inverted by the GPIO core if flagged so in the + * decriptor. + */ + if (config->enabled_at_boot) + gflags = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE; + else + gflags = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE; + + cfg.ena_gpiod = gpiod_get_optional(dev, "enable", gflags); + if (IS_ERR(cfg.ena_gpiod)) + return PTR_ERR(cfg.ena_gpiod); drvdata->dev = regulator_register(&drvdata->desc, &cfg); if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); - dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); - goto err_memstate; + dev_err(dev, "Failed to register regulator: %d\n", ret); + return ret; } platform_set_drvdata(pdev, drvdata); return 0; - -err_memstate: - kfree(drvdata->states); -err_stategpio: - gpio_free_array(drvdata->gpios, drvdata->nr_gpios); -err_memgpio: - kfree(drvdata->gpios); -err_name: - kfree(drvdata->desc.name); - return ret; } static int gpio_regulator_remove(struct platform_device *pdev) @@ -385,13 +344,6 @@ static int gpio_regulator_remove(struct platform_device *pdev) regulator_unregister(drvdata->dev); - gpio_free_array(drvdata->gpios, drvdata->nr_gpios); - - kfree(drvdata->states); - kfree(drvdata->gpios); - - kfree(drvdata->desc.name); - return 0; } diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index 5686a1335bd3..32d3f0499e2d 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -594,28 +594,30 @@ int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev, EXPORT_SYMBOL_GPL(regulator_list_voltage_pickable_linear_range); /** - * regulator_list_voltage_linear_range - List voltages for linear ranges + * regulator_desc_list_voltage_linear_range - List voltages for linear ranges * - * @rdev: Regulator device + * @desc: Regulator desc for regulator which volatges are to be listed * @selector: Selector to convert into a voltage * * Regulators with a series of simple linear mappings between voltages - * and selectors can set linear_ranges in the regulator descriptor and - * then use this function as their list_voltage() operation, + * and selectors who have set linear_ranges in the regulator descriptor + * can use this function prior regulator registration to list voltages. + * This is useful when voltages need to be listed during device-tree + * parsing. */ -int regulator_list_voltage_linear_range(struct regulator_dev *rdev, - unsigned int selector) +int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, + unsigned int selector) { const struct regulator_linear_range *range; int i; - if (!rdev->desc->n_linear_ranges) { - BUG_ON(!rdev->desc->n_linear_ranges); + if (!desc->n_linear_ranges) { + BUG_ON(!desc->n_linear_ranges); return -EINVAL; } - for (i = 0; i < rdev->desc->n_linear_ranges; i++) { - range = &rdev->desc->linear_ranges[i]; + for (i = 0; i < desc->n_linear_ranges; i++) { + range = &desc->linear_ranges[i]; if (!(selector >= range->min_sel && selector <= range->max_sel)) @@ -628,6 +630,23 @@ int regulator_list_voltage_linear_range(struct regulator_dev *rdev, return -EINVAL; } +EXPORT_SYMBOL_GPL(regulator_desc_list_voltage_linear_range); + +/** + * regulator_list_voltage_linear_range - List voltages for linear ranges + * + * @rdev: Regulator device + * @selector: Selector to convert into a voltage + * + * Regulators with a series of simple linear mappings between voltages + * and selectors can set linear_ranges in the regulator descriptor and + * then use this function as their list_voltage() operation, + */ +int regulator_list_voltage_linear_range(struct regulator_dev *rdev, + unsigned int selector) +{ + return regulator_desc_list_voltage_linear_range(rdev->desc, selector); +} EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range); /** @@ -761,3 +780,89 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, rdev->desc->active_discharge_mask, val); } EXPORT_SYMBOL_GPL(regulator_set_active_discharge_regmap); + +/** + * regulator_set_current_limit_regmap - set_current_limit for regmap users + * + * @rdev: regulator to operate on + * @min_uA: Lower bound for current limit + * @max_uA: Upper bound for current limit + * + * Regulators that use regmap for their register I/O can set curr_table, + * csel_reg and csel_mask fields in their descriptor and then use this + * as their set_current_limit operation, saving some code. + */ +int regulator_set_current_limit_regmap(struct regulator_dev *rdev, + int min_uA, int max_uA) +{ + unsigned int n_currents = rdev->desc->n_current_limits; + int i, sel = -1; + + if (n_currents == 0) + return -EINVAL; + + if (rdev->desc->curr_table) { + const unsigned int *curr_table = rdev->desc->curr_table; + bool ascend = curr_table[n_currents - 1] > curr_table[0]; + + /* search for closest to maximum */ + if (ascend) { + for (i = n_currents - 1; i >= 0; i--) { + if (min_uA <= curr_table[i] && + curr_table[i] <= max_uA) { + sel = i; + break; + } + } + } else { + for (i = 0; i < n_currents; i++) { + if (min_uA <= curr_table[i] && + curr_table[i] <= max_uA) { + sel = i; + break; + } + } + } + } + + if (sel < 0) + return -EINVAL; + + sel <<= ffs(rdev->desc->csel_mask) - 1; + + return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg, + rdev->desc->csel_mask, sel); +} +EXPORT_SYMBOL_GPL(regulator_set_current_limit_regmap); + +/** + * regulator_get_current_limit_regmap - get_current_limit for regmap users + * + * @rdev: regulator to operate on + * + * Regulators that use regmap for their register I/O can set the + * csel_reg and csel_mask fields in their descriptor and then use this + * as their get_current_limit operation, saving some code. + */ +int regulator_get_current_limit_regmap(struct regulator_dev *rdev) +{ + unsigned int val; + int ret; + + ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val); + if (ret != 0) + return ret; + + val &= rdev->desc->csel_mask; + val >>= ffs(rdev->desc->csel_mask) - 1; + + if (rdev->desc->curr_table) { + if (val >= rdev->desc->n_current_limits) + return -EINVAL; + + return rdev->desc->curr_table[val]; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(regulator_get_current_limit_regmap); diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c index 36ae54b53814..bba24a6fdb1e 100644 --- a/drivers/regulator/hi655x-regulator.c +++ b/drivers/regulator/hi655x-regulator.c @@ -28,7 +28,6 @@ struct hi655x_regulator { unsigned int disable_reg; unsigned int status_reg; - unsigned int ctrl_regs; unsigned int ctrl_mask; struct regulator_desc rdesc; }; diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c index 4abd8e9c81e5..6f28bba81d13 100644 --- a/drivers/regulator/isl6271a-regulator.c +++ b/drivers/regulator/isl6271a-regulator.c @@ -31,7 +31,6 @@ /* PMIC details */ struct isl_pmic { struct i2c_client *client; - struct regulator_dev *rdev[3]; struct mutex mtx; }; @@ -66,14 +65,14 @@ static int isl6271a_set_voltage_sel(struct regulator_dev *dev, return err; } -static struct regulator_ops isl_core_ops = { +static const struct regulator_ops isl_core_ops = { .get_voltage_sel = isl6271a_get_voltage_sel, .set_voltage_sel = isl6271a_set_voltage_sel, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, }; -static struct regulator_ops isl_fixed_ops = { +static const struct regulator_ops isl_fixed_ops = { .list_voltage = regulator_list_voltage_linear, }; @@ -109,6 +108,7 @@ static const struct regulator_desc isl_rd[] = { static int isl6271a_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { + struct regulator_dev *rdev; struct regulator_config config = { }; struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct isl_pmic *pmic; @@ -133,11 +133,10 @@ static int isl6271a_probe(struct i2c_client *i2c, config.init_data = NULL; config.driver_data = pmic; - pmic->rdev[i] = devm_regulator_register(&i2c->dev, &isl_rd[i], - &config); - if (IS_ERR(pmic->rdev[i])) { + rdev = devm_regulator_register(&i2c->dev, &isl_rd[i], &config); + if (IS_ERR(rdev)) { dev_err(&i2c->dev, "failed to register %s\n", id->name); - return PTR_ERR(pmic->rdev[i]); + return PTR_ERR(rdev); } } diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c index 8c0e8419c43f..c876e161052a 100644 --- a/drivers/regulator/lm363x-regulator.c +++ b/drivers/regulator/lm363x-regulator.c @@ -258,6 +258,9 @@ static int lm363x_regulator_probe(struct platform_device *pdev) * Register update is required if the pin is used. */ gpiod = lm363x_regulator_of_get_enable_gpio(dev, id); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + if (gpiod) { cfg.ena_gpiod = gpiod; @@ -265,8 +268,7 @@ static int lm363x_regulator_probe(struct platform_device *pdev) LM3632_EXT_EN_MASK, LM3632_EXT_EN_MASK); if (ret) { - if (gpiod) - gpiod_put(gpiod); + gpiod_put(gpiod); dev_err(dev, "External pin err: %d\n", ret); return ret; } diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c index 5a89e6d4b9a6..ff97cc50f2eb 100644 --- a/drivers/regulator/lochnagar-regulator.c +++ b/drivers/regulator/lochnagar-regulator.c @@ -194,7 +194,7 @@ static const struct regulator_desc lochnagar_regulators[] = { .name = "VDDCORE", .supply_name = "SYSVDD", .type = REGULATOR_VOLTAGE, - .n_voltages = 57, + .n_voltages = 66, .ops = &lochnagar_vddcore_ops, .id = LOCHNAGAR_VDDCORE, @@ -226,14 +226,15 @@ static const struct of_device_id lochnagar_of_match[] = { }, { .compatible = "cirrus,lochnagar2-mic2vdd", - .data = &lochnagar_regulators[LOCHNAGAR_MIC1VDD], + .data = &lochnagar_regulators[LOCHNAGAR_MIC2VDD], }, { .compatible = "cirrus,lochnagar2-vddcore", .data = &lochnagar_regulators[LOCHNAGAR_VDDCORE], }, - {}, + {} }; +MODULE_DEVICE_TABLE(of, lochnagar_of_match); static int lochnagar_regulator_probe(struct platform_device *pdev) { diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 204b5c5270e0..9e45112658ba 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -159,7 +159,7 @@ static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev, selector << LDO_VOL_CONTR_SHIFT(ldo)); } -static struct regulator_ops lp3971_ldo_ops = { +static const struct regulator_ops lp3971_ldo_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .is_enabled = lp3971_ldo_is_enabled, @@ -233,7 +233,7 @@ static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev, 0 << BUCK_VOL_CHANGE_SHIFT(buck)); } -static struct regulator_ops lp3971_dcdc_ops = { +static const struct regulator_ops lp3971_dcdc_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .is_enabled = lp3971_dcdc_is_enabled, diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c index ff0c275f902e..fb098198b688 100644 --- a/drivers/regulator/lp3972.c +++ b/drivers/regulator/lp3972.c @@ -305,7 +305,7 @@ static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev, return ret; } -static struct regulator_ops lp3972_ldo_ops = { +static const struct regulator_ops lp3972_ldo_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .is_enabled = lp3972_ldo_is_enabled, @@ -386,7 +386,7 @@ static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev, LP3972_VOL_CHANGE_FLAG_MASK, 0); } -static struct regulator_ops lp3972_dcdc_ops = { +static const struct regulator_ops lp3972_dcdc_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .is_enabled = lp3972_dcdc_is_enabled, diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c index 38992112fd6e..ca95257ce252 100644 --- a/drivers/regulator/lp872x.c +++ b/drivers/regulator/lp872x.c @@ -353,64 +353,6 @@ static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev) return val & LP872X_VOUT_M; } -static int lp8725_buck_set_current_limit(struct regulator_dev *rdev, - int min_uA, int max_uA) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - int i; - u8 addr; - - switch (buck) { - case LP8725_ID_BUCK1: - addr = LP8725_BUCK1_VOUT2; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK2_VOUT2; - break; - default: - return -EINVAL; - } - - for (i = ARRAY_SIZE(lp8725_buck_uA) - 1; i >= 0; i--) { - if (lp8725_buck_uA[i] >= min_uA && - lp8725_buck_uA[i] <= max_uA) - return lp872x_update_bits(lp, addr, - LP8725_BUCK_CL_M, - i << LP8725_BUCK_CL_S); - } - - return -EINVAL; -} - -static int lp8725_buck_get_current_limit(struct regulator_dev *rdev) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - u8 addr, val; - int ret; - - switch (buck) { - case LP8725_ID_BUCK1: - addr = LP8725_BUCK1_VOUT2; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK2_VOUT2; - break; - default: - return -EINVAL; - } - - ret = lp872x_read_byte(lp, addr, &val); - if (ret) - return ret; - - val = (val & LP8725_BUCK_CL_M) >> LP8725_BUCK_CL_S; - - return (val < ARRAY_SIZE(lp8725_buck_uA)) ? - lp8725_buck_uA[val] : -EINVAL; -} - static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct lp872x *lp = rdev_get_drvdata(rdev); @@ -478,7 +420,7 @@ static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev) return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL; } -static struct regulator_ops lp872x_ldo_ops = { +static const struct regulator_ops lp872x_ldo_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = regulator_set_voltage_sel_regmap, @@ -489,7 +431,7 @@ static struct regulator_ops lp872x_ldo_ops = { .enable_time = lp872x_regulator_enable_time, }; -static struct regulator_ops lp8720_buck_ops = { +static const struct regulator_ops lp8720_buck_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = lp872x_buck_set_voltage_sel, @@ -502,7 +444,7 @@ static struct regulator_ops lp8720_buck_ops = { .get_mode = lp872x_buck_get_mode, }; -static struct regulator_ops lp8725_buck_ops = { +static const struct regulator_ops lp8725_buck_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = lp872x_buck_set_voltage_sel, @@ -513,11 +455,11 @@ static struct regulator_ops lp8725_buck_ops = { .enable_time = lp872x_regulator_enable_time, .set_mode = lp872x_buck_set_mode, .get_mode = lp872x_buck_get_mode, - .set_current_limit = lp8725_buck_set_current_limit, - .get_current_limit = lp8725_buck_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; -static struct regulator_desc lp8720_regulator_desc[] = { +static const struct regulator_desc lp8720_regulator_desc[] = { { .name = "ldo1", .of_match = of_match_ptr("ldo1"), @@ -602,7 +544,7 @@ static struct regulator_desc lp8720_regulator_desc[] = { }, }; -static struct regulator_desc lp8725_regulator_desc[] = { +static const struct regulator_desc lp8725_regulator_desc[] = { { .name = "ldo1", .of_match = of_match_ptr("ldo1"), @@ -712,6 +654,10 @@ static struct regulator_desc lp8725_regulator_desc[] = { .owner = THIS_MODULE, .enable_reg = LP872X_GENERAL_CFG, .enable_mask = LP8725_BUCK1_EN_M, + .curr_table = lp8725_buck_uA, + .n_current_limits = ARRAY_SIZE(lp8725_buck_uA), + .csel_reg = LP8725_BUCK1_VOUT2, + .csel_mask = LP8725_BUCK_CL_M, }, { .name = "buck2", @@ -724,6 +670,10 @@ static struct regulator_desc lp8725_regulator_desc[] = { .owner = THIS_MODULE, .enable_reg = LP872X_GENERAL_CFG, .enable_mask = LP8725_BUCK2_EN_M, + .curr_table = lp8725_buck_uA, + .n_current_limits = ARRAY_SIZE(lp8725_buck_uA), + .csel_reg = LP8725_BUCK2_VOUT2, + .csel_mask = LP8725_BUCK_CL_M, }, }; @@ -820,7 +770,7 @@ static struct regulator_init_data static int lp872x_regulator_register(struct lp872x *lp) { - struct regulator_desc *desc; + const struct regulator_desc *desc; struct regulator_config cfg = { }; struct regulator_dev *rdev; int i; diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c index 70e3df653381..b55de293ca7a 100644 --- a/drivers/regulator/lp873x-regulator.c +++ b/drivers/regulator/lp873x-regulator.c @@ -39,6 +39,10 @@ .ramp_delay = _delay, \ .linear_ranges = _lr, \ .n_linear_ranges = ARRAY_SIZE(_lr), \ + .curr_table = lp873x_buck_uA, \ + .n_current_limits = ARRAY_SIZE(lp873x_buck_uA), \ + .csel_reg = (_cr), \ + .csel_mask = LP873X_BUCK0_CTRL_2_BUCK0_ILIM,\ }, \ .ctrl2_reg = _cr, \ } @@ -61,7 +65,7 @@ static const struct regulator_linear_range ldo0_ldo1_ranges[] = { REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000), }; -static unsigned int lp873x_buck_ramp_delay[] = { +static const unsigned int lp873x_buck_ramp_delay[] = { 30000, 15000, 10000, 7500, 3800, 1900, 940, 470 }; @@ -108,45 +112,8 @@ static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev, return 0; } -static int lp873x_buck_set_current_limit(struct regulator_dev *rdev, - int min_uA, int max_uA) -{ - int id = rdev_get_id(rdev); - struct lp873x *lp873 = rdev_get_drvdata(rdev); - int i; - - for (i = ARRAY_SIZE(lp873x_buck_uA) - 1; i >= 0; i--) { - if (lp873x_buck_uA[i] >= min_uA && - lp873x_buck_uA[i] <= max_uA) - return regmap_update_bits(lp873->regmap, - regulators[id].ctrl2_reg, - LP873X_BUCK0_CTRL_2_BUCK0_ILIM, - i << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM)); - } - - return -EINVAL; -} - -static int lp873x_buck_get_current_limit(struct regulator_dev *rdev) -{ - int id = rdev_get_id(rdev); - struct lp873x *lp873 = rdev_get_drvdata(rdev); - int ret; - unsigned int val; - - ret = regmap_read(lp873->regmap, regulators[id].ctrl2_reg, &val); - if (ret) - return ret; - - val = (val & LP873X_BUCK0_CTRL_2_BUCK0_ILIM) >> - __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM); - - return (val < ARRAY_SIZE(lp873x_buck_uA)) ? - lp873x_buck_uA[val] : -EINVAL; -} - /* Operations permitted on BUCK0, BUCK1 */ -static struct regulator_ops lp873x_buck01_ops = { +static const struct regulator_ops lp873x_buck01_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, @@ -156,12 +123,12 @@ static struct regulator_ops lp873x_buck01_ops = { .map_voltage = regulator_map_voltage_linear_range, .set_voltage_time_sel = regulator_set_voltage_time_sel, .set_ramp_delay = lp873x_buck_set_ramp_delay, - .set_current_limit = lp873x_buck_set_current_limit, - .get_current_limit = lp873x_buck_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; /* Operations permitted on LDO0 and LDO1 */ -static struct regulator_ops lp873x_ldo01_ops = { +static const struct regulator_ops lp873x_ldo01_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c index 244822bb63cd..14fd38807134 100644 --- a/drivers/regulator/lp8755.c +++ b/drivers/regulator/lp8755.c @@ -315,7 +315,7 @@ out_i2c_error: .vsel_mask = LP8755_BUCK_VOUT_M,\ } -static struct regulator_desc lp8755_regulators[] = { +static const struct regulator_desc lp8755_regulators[] = { lp8755_buck_desc(0), lp8755_buck_desc(1), lp8755_buck_desc(2), @@ -386,7 +386,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data) if (ret < 0) goto err_i2c; - /* send OCP event to all regualtor devices */ + /* send OCP event to all regulator devices */ if ((flag1 & 0x01) && (pchip->irqmask & 0x01)) for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++) if (pchip->rdev[icnt] != NULL) @@ -394,7 +394,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data) LP8755_EVENT_OCP, NULL); - /* send OVP event to all regualtor devices */ + /* send OVP event to all regulator devices */ if ((flag1 & 0x02) && (pchip->irqmask & 0x02)) for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++) if (pchip->rdev[icnt] != NULL) diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c index c192357d1dea..4ed41731a5b1 100644 --- a/drivers/regulator/lp87565-regulator.c +++ b/drivers/regulator/lp87565-regulator.c @@ -51,7 +51,7 @@ static const struct regulator_linear_range buck0_1_2_3_ranges[] = { REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000), }; -static unsigned int lp87565_buck_ramp_delay[] = { +static const unsigned int lp87565_buck_ramp_delay[] = { 30000, 15000, 10000, 7500, 3800, 1900, 940, 470 }; @@ -140,7 +140,7 @@ static int lp87565_buck_get_current_limit(struct regulator_dev *rdev) } /* Operations permitted on BUCK0, BUCK1 */ -static struct regulator_ops lp87565_buck_ops = { +static const struct regulator_ops lp87565_buck_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c index ec46290b647e..a7d30550bb5f 100644 --- a/drivers/regulator/lp8788-buck.c +++ b/drivers/regulator/lp8788-buck.c @@ -95,12 +95,10 @@ struct lp8788_buck { void *dvs; }; -/* BUCK 1 ~ 4 voltage table */ -static const int lp8788_buck_vtbl[] = { - 500000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, - 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, - 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, - 1950000, 2000000, +/* BUCK 1 ~ 4 voltage ranges */ +static const struct regulator_linear_range buck_volt_ranges[] = { + REGULATOR_LINEAR_RANGE(500000, 0, 0, 0), + REGULATOR_LINEAR_RANGE(800000, 1, 25, 50000), }; static void lp8788_buck1_set_dvs(struct lp8788_buck *buck) @@ -345,8 +343,8 @@ static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev) } static const struct regulator_ops lp8788_buck12_ops = { - .list_voltage = regulator_list_voltage_table, - .map_voltage = regulator_map_voltage_ascend, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = lp8788_buck12_set_voltage_sel, .get_voltage_sel = lp8788_buck12_get_voltage_sel, .enable = regulator_enable_regmap, @@ -358,8 +356,8 @@ static const struct regulator_ops lp8788_buck12_ops = { }; static const struct regulator_ops lp8788_buck34_ops = { - .list_voltage = regulator_list_voltage_table, - .map_voltage = regulator_map_voltage_ascend, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .enable = regulator_enable_regmap, @@ -370,13 +368,14 @@ static const struct regulator_ops lp8788_buck34_ops = { .get_mode = lp8788_buck_get_mode, }; -static struct regulator_desc lp8788_buck_desc[] = { +static const struct regulator_desc lp8788_buck_desc[] = { { .name = "buck1", .id = BUCK1, .ops = &lp8788_buck12_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, + .n_voltages = 26, + .linear_ranges = buck_volt_ranges, + .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_BUCK, @@ -386,8 +385,9 @@ static struct regulator_desc lp8788_buck_desc[] = { .name = "buck2", .id = BUCK2, .ops = &lp8788_buck12_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, + .n_voltages = 26, + .linear_ranges = buck_volt_ranges, + .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_BUCK, @@ -397,8 +397,9 @@ static struct regulator_desc lp8788_buck_desc[] = { .name = "buck3", .id = BUCK3, .ops = &lp8788_buck34_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, + .n_voltages = 26, + .linear_ranges = buck_volt_ranges, + .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .vsel_reg = LP8788_BUCK3_VOUT, @@ -410,8 +411,9 @@ static struct regulator_desc lp8788_buck_desc[] = { .name = "buck4", .id = BUCK4, .ops = &lp8788_buck34_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, + .n_voltages = 26, + .linear_ranges = buck_volt_ranges, + .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .vsel_reg = LP8788_BUCK4_VOUT, diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c index 2ee22e7ea675..a2ef146e6b3a 100644 --- a/drivers/regulator/lp8788-ldo.c +++ b/drivers/regulator/lp8788-ldo.c @@ -186,7 +186,7 @@ static const struct regulator_ops lp8788_ldo_voltage_fixed_ops = { .enable_time = lp8788_ldo_enable_time, }; -static struct regulator_desc lp8788_dldo_desc[] = { +static const struct regulator_desc lp8788_dldo_desc[] = { { .name = "dldo1", .id = DLDO1, @@ -343,7 +343,7 @@ static struct regulator_desc lp8788_dldo_desc[] = { }, }; -static struct regulator_desc lp8788_aldo_desc[] = { +static const struct regulator_desc lp8788_aldo_desc[] = { { .name = "aldo1", .id = ALDO1, diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c index 71fd0f2a4b76..e6d66e492b85 100644 --- a/drivers/regulator/ltc3676.c +++ b/drivers/regulator/ltc3676.c @@ -241,61 +241,10 @@ static struct regulator_desc ltc3676_regulators[LTC3676_NUM_REGULATORS] = { LTC3676_FIXED_REG(LDO4, ldo4, LDOB, 2), }; -static bool ltc3676_writeable_reg(struct device *dev, unsigned int reg) +static bool ltc3676_readable_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { - case LTC3676_IRQSTAT: - case LTC3676_BUCK1: - case LTC3676_BUCK2: - case LTC3676_BUCK3: - case LTC3676_BUCK4: - case LTC3676_LDOA: - case LTC3676_LDOB: - case LTC3676_SQD1: - case LTC3676_SQD2: - case LTC3676_CNTRL: - case LTC3676_DVB1A: - case LTC3676_DVB1B: - case LTC3676_DVB2A: - case LTC3676_DVB2B: - case LTC3676_DVB3A: - case LTC3676_DVB3B: - case LTC3676_DVB4A: - case LTC3676_DVB4B: - case LTC3676_MSKIRQ: - case LTC3676_MSKPG: - case LTC3676_USER: - case LTC3676_HRST: - case LTC3676_CLIRQ: - return true; - } - return false; -} - -static bool ltc3676_readable_reg(struct device *dev, unsigned int reg) -{ - switch (reg) { - case LTC3676_IRQSTAT: - case LTC3676_BUCK1: - case LTC3676_BUCK2: - case LTC3676_BUCK3: - case LTC3676_BUCK4: - case LTC3676_LDOA: - case LTC3676_LDOB: - case LTC3676_SQD1: - case LTC3676_SQD2: - case LTC3676_CNTRL: - case LTC3676_DVB1A: - case LTC3676_DVB1B: - case LTC3676_DVB2A: - case LTC3676_DVB2B: - case LTC3676_DVB3A: - case LTC3676_DVB3B: - case LTC3676_DVB4A: - case LTC3676_DVB4B: - case LTC3676_MSKIRQ: - case LTC3676_MSKPG: - case LTC3676_USER: + case LTC3676_BUCK1 ... LTC3676_IRQSTAT: case LTC3676_HRST: case LTC3676_CLIRQ: return true; @@ -306,9 +255,7 @@ static bool ltc3676_readable_reg(struct device *dev, unsigned int reg) static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { - case LTC3676_IRQSTAT: - case LTC3676_PGSTATL: - case LTC3676_PGSTATRT: + case LTC3676_IRQSTAT ... LTC3676_PGSTATRT: return true; } return false; @@ -317,8 +264,8 @@ static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg) static const struct regmap_config ltc3676_regmap_config = { .reg_bits = 8, .val_bits = 8, - .writeable_reg = ltc3676_writeable_reg, - .readable_reg = ltc3676_readable_reg, + .writeable_reg = ltc3676_readable_writeable_reg, + .readable_reg = ltc3676_readable_writeable_reg, .volatile_reg = ltc3676_volatile_reg, .max_register = LTC3676_CLIRQ, .use_single_read = true, @@ -442,5 +389,5 @@ static struct i2c_driver ltc3676_driver = { module_i2c_driver(ltc3676_driver); MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>"); -MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC1376"); +MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3676"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c index bc7f4751bf9c..85a88a9e4d42 100644 --- a/drivers/regulator/max14577-regulator.c +++ b/drivers/regulator/max14577-regulator.c @@ -324,4 +324,3 @@ module_exit(max14577_regulator_exit); MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:max14577-regulator"); diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c index b94e3a721721..1607ac673e44 100644 --- a/drivers/regulator/max77620-regulator.c +++ b/drivers/regulator/max77620-regulator.c @@ -1,7 +1,7 @@ /* * Maxim MAX77620 Regulator driver * - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Author: Mallikarjun Kasoju <mkasoju@nvidia.com> * Laxman Dewangan <ldewangan@nvidia.com> @@ -690,6 +690,7 @@ static const struct regulator_ops max77620_regulator_ops = { .active_discharge_mask = MAX77620_SD_CFG1_ADE_MASK, \ .active_discharge_reg = MAX77620_REG_##_id##_CFG, \ .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ }, \ } @@ -721,6 +722,7 @@ static const struct regulator_ops max77620_regulator_ops = { .active_discharge_mask = MAX77620_LDO_CFG2_ADE_MASK, \ .active_discharge_reg = MAX77620_REG_##_id##_CFG2, \ .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ }, \ } @@ -803,6 +805,14 @@ static int max77620_regulator_probe(struct platform_device *pdev) rdesc = &rinfo[id].desc; pmic->rinfo[id] = &max77620_regs_info[id]; pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL; + pmic->reg_pdata[id].active_fps_src = -1; + pmic->reg_pdata[id].active_fps_pd_slot = -1; + pmic->reg_pdata[id].active_fps_pu_slot = -1; + pmic->reg_pdata[id].suspend_fps_src = -1; + pmic->reg_pdata[id].suspend_fps_pd_slot = -1; + pmic->reg_pdata[id].suspend_fps_pu_slot = -1; + pmic->reg_pdata[id].power_ok = -1; + pmic->reg_pdata[id].ramp_rate_setting = -1; ret = max77620_read_slew_rate(pmic, id); if (ret < 0) diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c new file mode 100644 index 000000000000..31ebf34b01ec --- /dev/null +++ b/drivers/regulator/max77650-regulator.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2018 BayLibre SAS +// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com> +// +// Regulator driver for MAXIM 77650/77651 charger/power-supply. + +#include <linux/of.h> +#include <linux/mfd/max77650.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> + +#define MAX77650_REGULATOR_EN_CTRL_MASK GENMASK(3, 0) +#define MAX77650_REGULATOR_EN_CTRL_BITS(_reg) \ + ((_reg) & MAX77650_REGULATOR_EN_CTRL_MASK) +#define MAX77650_REGULATOR_ENABLED GENMASK(2, 1) +#define MAX77650_REGULATOR_DISABLED BIT(2) + +#define MAX77650_REGULATOR_V_LDO_MASK GENMASK(6, 0) +#define MAX77650_REGULATOR_V_SBB_MASK GENMASK(5, 0) + +#define MAX77650_REGULATOR_AD_MASK BIT(3) +#define MAX77650_REGULATOR_AD_DISABLED 0x00 +#define MAX77650_REGULATOR_AD_ENABLED BIT(3) + +#define MAX77650_REGULATOR_CURR_LIM_MASK GENMASK(7, 6) + +enum { + MAX77650_REGULATOR_ID_LDO = 0, + MAX77650_REGULATOR_ID_SBB0, + MAX77650_REGULATOR_ID_SBB1, + MAX77650_REGULATOR_ID_SBB2, + MAX77650_REGULATOR_NUM_REGULATORS, +}; + +struct max77650_regulator_desc { + struct regulator_desc desc; + unsigned int regA; + unsigned int regB; +}; + +static const u32 max77651_sbb1_regulator_volt_table[] = { + 2400000, 3200000, 4000000, 4800000, + 2450000, 3250000, 4050000, 4850000, + 2500000, 3300000, 4100000, 4900000, + 2550000, 3350000, 4150000, 4950000, + 2600000, 3400000, 4200000, 5000000, + 2650000, 3450000, 4250000, 5050000, + 2700000, 3500000, 4300000, 5100000, + 2750000, 3550000, 4350000, 5150000, + 2800000, 3600000, 4400000, 5200000, + 2850000, 3650000, 4450000, 5250000, + 2900000, 3700000, 4500000, 0, + 2950000, 3750000, 4550000, 0, + 3000000, 3800000, 4600000, 0, + 3050000, 3850000, 4650000, 0, + 3100000, 3900000, 4700000, 0, + 3150000, 3950000, 4750000, 0, +}; + +#define MAX77651_REGULATOR_SBB1_SEL_DEC(_val) \ + (((_val & 0x3c) >> 2) | ((_val & 0x03) << 4)) +#define MAX77651_REGULATOR_SBB1_SEL_ENC(_val) \ + (((_val & 0x30) >> 4) | ((_val & 0x0f) << 2)) + +#define MAX77650_REGULATOR_SBB1_SEL_DECR(_val) \ + do { \ + _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \ + _val--; \ + _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \ + } while (0) + +#define MAX77650_REGULATOR_SBB1_SEL_INCR(_val) \ + do { \ + _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \ + _val++; \ + _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \ + } while (0) + +static const unsigned int max77650_current_limit_table[] = { + 1000000, 866000, 707000, 500000, +}; + +static int max77650_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct max77650_regulator_desc *rdesc; + struct regmap *map; + int val, rv, en; + + rdesc = rdev_get_drvdata(rdev); + map = rdev_get_regmap(rdev); + + rv = regmap_read(map, rdesc->regB, &val); + if (rv) + return rv; + + en = MAX77650_REGULATOR_EN_CTRL_BITS(val); + + return en != MAX77650_REGULATOR_DISABLED; +} + +static int max77650_regulator_enable(struct regulator_dev *rdev) +{ + struct max77650_regulator_desc *rdesc; + struct regmap *map; + + rdesc = rdev_get_drvdata(rdev); + map = rdev_get_regmap(rdev); + + return regmap_update_bits(map, rdesc->regB, + MAX77650_REGULATOR_EN_CTRL_MASK, + MAX77650_REGULATOR_ENABLED); +} + +static int max77650_regulator_disable(struct regulator_dev *rdev) +{ + struct max77650_regulator_desc *rdesc; + struct regmap *map; + + rdesc = rdev_get_drvdata(rdev); + map = rdev_get_regmap(rdev); + + return regmap_update_bits(map, rdesc->regB, + MAX77650_REGULATOR_EN_CTRL_MASK, + MAX77650_REGULATOR_DISABLED); +} + +static int max77650_regulator_set_voltage_sel(struct regulator_dev *rdev, + unsigned int sel) +{ + int rv = 0, curr, diff; + bool ascending; + + /* + * If the regulator is disabled, we can program the desired + * voltage right away. + */ + if (!max77650_regulator_is_enabled(rdev)) + return regulator_set_voltage_sel_regmap(rdev, sel); + + /* + * Otherwise we need to manually ramp the output voltage up/down + * one step at a time. + */ + + curr = regulator_get_voltage_sel_regmap(rdev); + if (curr < 0) + return curr; + + diff = curr - sel; + if (diff == 0) + return 0; /* Already there. */ + else if (diff > 0) + ascending = false; + else + ascending = true; + + /* + * Make sure we'll get to the right voltage and break the loop even if + * the selector equals 0. + */ + for (ascending ? curr++ : curr--;; ascending ? curr++ : curr--) { + rv = regulator_set_voltage_sel_regmap(rdev, curr); + if (rv) + return rv; + + if (curr == sel) + break; + } + + return 0; +} + +/* + * Special case: non-linear voltage table for max77651 SBB1 - software + * must ensure the voltage is ramped in 50mV increments. + */ +static int max77651_regulator_sbb1_set_voltage_sel(struct regulator_dev *rdev, + unsigned int sel) +{ + int rv = 0, curr, vcurr, vdest, vdiff; + + /* + * If the regulator is disabled, we can program the desired + * voltage right away. + */ + if (!max77650_regulator_is_enabled(rdev)) + return regulator_set_voltage_sel_regmap(rdev, sel); + + curr = regulator_get_voltage_sel_regmap(rdev); + if (curr < 0) + return curr; + + if (curr == sel) + return 0; /* Already there. */ + + vcurr = max77651_sbb1_regulator_volt_table[curr]; + vdest = max77651_sbb1_regulator_volt_table[sel]; + vdiff = vcurr - vdest; + + for (;;) { + if (vdiff > 0) + MAX77650_REGULATOR_SBB1_SEL_DECR(curr); + else + MAX77650_REGULATOR_SBB1_SEL_INCR(curr); + + rv = regulator_set_voltage_sel_regmap(rdev, curr); + if (rv) + return rv; + + if (curr == sel) + break; + }; + + return 0; +} + +static const struct regulator_ops max77650_regulator_LDO_ops = { + .is_enabled = max77650_regulator_is_enabled, + .enable = max77650_regulator_enable, + .disable = max77650_regulator_disable, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = max77650_regulator_set_voltage_sel, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static const struct regulator_ops max77650_regulator_SBB_ops = { + .is_enabled = max77650_regulator_is_enabled, + .enable = max77650_regulator_enable, + .disable = max77650_regulator_disable, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = max77650_regulator_set_voltage_sel, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +/* Special case for max77651 SBB1 - non-linear voltage mapping. */ +static const struct regulator_ops max77651_SBB1_regulator_ops = { + .is_enabled = max77650_regulator_is_enabled, + .enable = max77650_regulator_enable, + .disable = max77650_regulator_disable, + .list_voltage = regulator_list_voltage_table, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = max77651_regulator_sbb1_set_voltage_sel, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static struct max77650_regulator_desc max77650_LDO_desc = { + .desc = { + .name = "ldo", + .of_match = of_match_ptr("ldo"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-ldo", + .id = MAX77650_REGULATOR_ID_LDO, + .ops = &max77650_regulator_LDO_ops, + .min_uV = 1350000, + .uV_step = 12500, + .n_voltages = 128, + .vsel_mask = MAX77650_REGULATOR_V_LDO_MASK, + .vsel_reg = MAX77650_REG_CNFG_LDO_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_LDO_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .regA = MAX77650_REG_CNFG_LDO_A, + .regB = MAX77650_REG_CNFG_LDO_B, +}; + +static struct max77650_regulator_desc max77650_SBB0_desc = { + .desc = { + .name = "sbb0", + .of_match = of_match_ptr("sbb0"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb0", + .id = MAX77650_REGULATOR_ID_SBB0, + .ops = &max77650_regulator_SBB_ops, + .min_uV = 800000, + .uV_step = 25000, + .n_voltages = 64, + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB0_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB0_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB0_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB0_A, + .regB = MAX77650_REG_CNFG_SBB0_B, +}; + +static struct max77650_regulator_desc max77650_SBB1_desc = { + .desc = { + .name = "sbb1", + .of_match = of_match_ptr("sbb1"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb1", + .id = MAX77650_REGULATOR_ID_SBB1, + .ops = &max77650_regulator_SBB_ops, + .min_uV = 800000, + .uV_step = 12500, + .n_voltages = 64, + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB1_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB1_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB1_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB1_A, + .regB = MAX77650_REG_CNFG_SBB1_B, +}; + +static struct max77650_regulator_desc max77651_SBB1_desc = { + .desc = { + .name = "sbb1", + .of_match = of_match_ptr("sbb1"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb1", + .id = MAX77650_REGULATOR_ID_SBB1, + .ops = &max77651_SBB1_regulator_ops, + .volt_table = max77651_sbb1_regulator_volt_table, + .n_voltages = ARRAY_SIZE(max77651_sbb1_regulator_volt_table), + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB1_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB1_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB1_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB1_A, + .regB = MAX77650_REG_CNFG_SBB1_B, +}; + +static struct max77650_regulator_desc max77650_SBB2_desc = { + .desc = { + .name = "sbb2", + .of_match = of_match_ptr("sbb2"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb0", + .id = MAX77650_REGULATOR_ID_SBB2, + .ops = &max77650_regulator_SBB_ops, + .min_uV = 800000, + .uV_step = 50000, + .n_voltages = 64, + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB2_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB2_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB2_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB2_A, + .regB = MAX77650_REG_CNFG_SBB2_B, +}; + +static struct max77650_regulator_desc max77651_SBB2_desc = { + .desc = { + .name = "sbb2", + .of_match = of_match_ptr("sbb2"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb0", + .id = MAX77650_REGULATOR_ID_SBB2, + .ops = &max77650_regulator_SBB_ops, + .min_uV = 2400000, + .uV_step = 50000, + .n_voltages = 64, + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB2_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB2_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB2_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB2_A, + .regB = MAX77650_REG_CNFG_SBB2_B, +}; + +static int max77650_regulator_probe(struct platform_device *pdev) +{ + struct max77650_regulator_desc **rdescs; + struct max77650_regulator_desc *rdesc; + struct regulator_config config = { }; + struct device *dev, *parent; + struct regulator_dev *rdev; + struct regmap *map; + unsigned int val; + int i, rv; + + dev = &pdev->dev; + parent = dev->parent; + + if (!dev->of_node) + dev->of_node = parent->of_node; + + rdescs = devm_kcalloc(dev, MAX77650_REGULATOR_NUM_REGULATORS, + sizeof(*rdescs), GFP_KERNEL); + if (!rdescs) + return -ENOMEM; + + map = dev_get_regmap(parent, NULL); + if (!map) + return -ENODEV; + + rv = regmap_read(map, MAX77650_REG_CID, &val); + if (rv) + return rv; + + rdescs[MAX77650_REGULATOR_ID_LDO] = &max77650_LDO_desc; + rdescs[MAX77650_REGULATOR_ID_SBB0] = &max77650_SBB0_desc; + + switch (MAX77650_CID_BITS(val)) { + case MAX77650_CID_77650A: + case MAX77650_CID_77650C: + rdescs[MAX77650_REGULATOR_ID_SBB1] = &max77650_SBB1_desc; + rdescs[MAX77650_REGULATOR_ID_SBB2] = &max77650_SBB2_desc; + break; + case MAX77650_CID_77651A: + case MAX77650_CID_77651B: + rdescs[MAX77650_REGULATOR_ID_SBB1] = &max77651_SBB1_desc; + rdescs[MAX77650_REGULATOR_ID_SBB2] = &max77651_SBB2_desc; + break; + default: + return -ENODEV; + } + + config.dev = parent; + + for (i = 0; i < MAX77650_REGULATOR_NUM_REGULATORS; i++) { + rdesc = rdescs[i]; + config.driver_data = rdesc; + + rdev = devm_regulator_register(dev, &rdesc->desc, &config); + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + } + + return 0; +} + +static struct platform_driver max77650_regulator_driver = { + .driver = { + .name = "max77650-regulator", + }, + .probe = max77650_regulator_probe, +}; +module_platform_driver(max77650_regulator_driver); + +MODULE_DESCRIPTION("MAXIM 77650/77651 regulator driver"); +MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c index c30cf5c9f2de..ea7b50397300 100644 --- a/drivers/regulator/max77802-regulator.c +++ b/drivers/regulator/max77802-regulator.c @@ -248,9 +248,9 @@ static int max77802_set_ramp_delay_2bit(struct regulator_dev *rdev, unsigned int ramp_value; if (id > MAX77802_BUCK4) { - dev_warn(&rdev->dev, - "%s: regulator: ramp delay not supported\n", - rdev->desc->name); + dev_warn(&rdev->dev, + "%s: regulator: ramp delay not supported\n", + rdev->desc->name); return -EINVAL; } ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_2bit, diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index 8fd1adc9c9a9..ab558b26cd7c 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c @@ -226,69 +226,69 @@ static const unsigned int mc13783_pwgtdrv_val[] = { 5500000, }; -static struct regulator_ops mc13783_gpo_regulator_ops; +static const struct regulator_ops mc13783_gpo_regulator_ops; -#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages) \ - MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \ +#define MC13783_DEFINE(prefix, name, node, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13783_REG_, name, node, reg, vsel_reg, voltages, \ mc13xxx_regulator_ops) -#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages) \ - MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \ +#define MC13783_FIXED_DEFINE(prefix, name, node, reg, voltages) \ + MC13xxx_FIXED_DEFINE(MC13783_REG_, name, node, reg, voltages, \ mc13xxx_fixed_regulator_ops) -#define MC13783_GPO_DEFINE(prefix, name, reg, voltages) \ - MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \ +#define MC13783_GPO_DEFINE(prefix, name, node, reg, voltages) \ + MC13xxx_GPO_DEFINE(MC13783_REG_, name, node, reg, voltages, \ mc13783_gpo_regulator_ops) -#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \ - MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages) -#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \ - MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages) +#define MC13783_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages) \ + MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages) +#define MC13783_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages) \ + MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages) static struct mc13xxx_regulator mc13783_regulators[] = { - MC13783_DEFINE_SW(SW1A, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val), - MC13783_DEFINE_SW(SW1B, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val), - MC13783_DEFINE_SW(SW2A, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val), - MC13783_DEFINE_SW(SW2B, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val), - MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val), - - MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val), - MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val), - MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_SW(SW1A, sw1a, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val), + MC13783_DEFINE_SW(SW1B, sw1b, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val), + MC13783_DEFINE_SW(SW2A, sw2a, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val), + MC13783_DEFINE_SW(SW2B, sw2b, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val), + MC13783_DEFINE_SW(SW3, sw3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val), + + MC13783_FIXED_DEFINE(REG, VAUDIO, vaudio, REGULATORMODE0, mc13783_vaudio_val), + MC13783_FIXED_DEFINE(REG, VIOHI, viohi, REGULATORMODE0, mc13783_viohi_val), + MC13783_DEFINE_REGU(VIOLO, violo, REGULATORMODE0, REGULATORSETTING0, mc13783_violo_val), - MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0, mc13783_vdig_val), - MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VGEN, vgen, REGULATORMODE0, REGULATORSETTING0, mc13783_vgen_val), - MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VRFDIG, vrfdig, REGULATORMODE0, REGULATORSETTING0, mc13783_vrfdig_val), - MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VRFREF, vrfref, REGULATORMODE0, REGULATORSETTING0, mc13783_vrfref_val), - MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VRFCP, vrfcp, REGULATORMODE0, REGULATORSETTING0, mc13783_vrfcp_val), - MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, + MC13783_DEFINE_REGU(VSIM, vsim, REGULATORMODE1, REGULATORSETTING0, mc13783_vsim_val), - MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, + MC13783_DEFINE_REGU(VESIM, vesim, REGULATORMODE1, REGULATORSETTING0, mc13783_vesim_val), - MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, + MC13783_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0, mc13783_vcam_val), - MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val), - MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, + MC13783_FIXED_DEFINE(REG, VRFBG, vrfbg, REGULATORMODE1, mc13783_vrfbg_val), + MC13783_DEFINE_REGU(VVIB, vvib, REGULATORMODE1, REGULATORSETTING1, mc13783_vvib_val), - MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, + MC13783_DEFINE_REGU(VRF1, vrf1, REGULATORMODE1, REGULATORSETTING1, mc13783_vrf_val), - MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, + MC13783_DEFINE_REGU(VRF2, vrf2, REGULATORMODE1, REGULATORSETTING1, mc13783_vrf_val), - MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, + MC13783_DEFINE_REGU(VMMC1, vmmc1, REGULATORMODE1, REGULATORSETTING1, mc13783_vmmc_val), - MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, + MC13783_DEFINE_REGU(VMMC2, vmmc2, REGULATORMODE1, REGULATORSETTING1, mc13783_vmmc_val), - MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val), - MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val), + MC13783_GPO_DEFINE(REG, GPO1, gpo1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO2, gpo1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO3, gpo1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO4, gpo1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, PWGT1SPI, pwgt1spi, POWERMISC, mc13783_pwgtdrv_val), + MC13783_GPO_DEFINE(REG, PWGT2SPI, pwgt2spi, POWERMISC, mc13783_pwgtdrv_val), }; static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, @@ -380,7 +380,7 @@ static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev) return (val & mc13xxx_regulators[id].enable_bit) != 0; } -static struct regulator_ops mc13783_gpo_regulator_ops = { +static const struct regulator_ops mc13783_gpo_regulator_ops = { .enable = mc13783_gpo_regulator_enable, .disable = mc13783_gpo_regulator_disable, .is_enabled = mc13783_gpo_regulator_is_enabled, diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c index f3fba1cc1379..a731e826a037 100644 --- a/drivers/regulator/mc13892-regulator.c +++ b/drivers/regulator/mc13892-regulator.c @@ -242,61 +242,61 @@ static const unsigned int mc13892_pwgtdrv[] = { 5000000, }; -static struct regulator_ops mc13892_gpo_regulator_ops; -static struct regulator_ops mc13892_sw_regulator_ops; +static const struct regulator_ops mc13892_gpo_regulator_ops; +static const struct regulator_ops mc13892_sw_regulator_ops; -#define MC13892_FIXED_DEFINE(name, reg, voltages) \ - MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages, \ +#define MC13892_FIXED_DEFINE(name, node, reg, voltages) \ + MC13xxx_FIXED_DEFINE(MC13892_, name, node, reg, voltages, \ mc13xxx_fixed_regulator_ops) -#define MC13892_GPO_DEFINE(name, reg, voltages) \ - MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages, \ +#define MC13892_GPO_DEFINE(name, node, reg, voltages) \ + MC13xxx_GPO_DEFINE(MC13892_, name, node, reg, voltages, \ mc13892_gpo_regulator_ops) -#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages) \ - MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ +#define MC13892_SW_DEFINE(name, node, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \ mc13892_sw_regulator_ops) -#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages) \ - MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ +#define MC13892_DEFINE_REGU(name, node, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \ mc13xxx_regulator_ops) static struct mc13xxx_regulator mc13892_regulators[] = { - MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell), - MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1), - MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw), - MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw), - MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw), - MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst), - MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi), - MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VCOINCELL, vcoincell, POWERCTL0, POWERCTL0, mc13892_vcoincell), + MC13892_SW_DEFINE(SW1, sw1, SWITCHERS0, SWITCHERS0, mc13892_sw1), + MC13892_SW_DEFINE(SW2, sw2, SWITCHERS1, SWITCHERS1, mc13892_sw), + MC13892_SW_DEFINE(SW3, sw3, SWITCHERS2, SWITCHERS2, mc13892_sw), + MC13892_SW_DEFINE(SW4, sw4, SWITCHERS3, SWITCHERS3, mc13892_sw), + MC13892_FIXED_DEFINE(SWBST, swbst, SWITCHERS5, mc13892_swbst), + MC13892_FIXED_DEFINE(VIOHI, viohi, REGULATORMODE0, mc13892_viohi), + MC13892_DEFINE_REGU(VPLL, vpll, REGULATORMODE0, REGULATORSETTING0, mc13892_vpll), - MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0, mc13892_vdig), - MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, + MC13892_DEFINE_REGU(VSD, vsd, REGULATORMODE1, REGULATORSETTING1, mc13892_vsd), - MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VUSB2, vusb2, REGULATORMODE0, REGULATORSETTING0, mc13892_vusb2), - MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, + MC13892_DEFINE_REGU(VVIDEO, vvideo, REGULATORMODE1, REGULATORSETTING1, mc13892_vvideo), - MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, + MC13892_DEFINE_REGU(VAUDIO, vaudio, REGULATORMODE1, REGULATORSETTING1, mc13892_vaudio), - MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, + MC13892_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0, mc13892_vcam), - MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VGEN1, vgen1, REGULATORMODE0, REGULATORSETTING0, mc13892_vgen1), - MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VGEN2, vgen2, REGULATORMODE0, REGULATORSETTING0, mc13892_vgen2), - MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, + MC13892_DEFINE_REGU(VGEN3, vgen3, REGULATORMODE1, REGULATORSETTING0, mc13892_vgen3), - MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb), - MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo), - MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo), - MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo), - MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo), - MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv), - MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv), + MC13892_FIXED_DEFINE(VUSB, vusb, USB1, mc13892_vusb), + MC13892_GPO_DEFINE(GPO1, gpo1, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO2, gpo2, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO3, gpo3, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO4, gpo4, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(PWGT1SPI, pwgt1spi, POWERMISC, mc13892_pwgtdrv), + MC13892_GPO_DEFINE(PWGT2SPI, pwgt2spi, POWERMISC, mc13892_pwgtdrv), }; static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, @@ -387,7 +387,7 @@ static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev) } -static struct regulator_ops mc13892_gpo_regulator_ops = { +static const struct regulator_ops mc13892_gpo_regulator_ops = { .enable = mc13892_gpo_regulator_enable, .disable = mc13892_gpo_regulator_disable, .is_enabled = mc13892_gpo_regulator_is_enabled, @@ -479,7 +479,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, return ret; } -static struct regulator_ops mc13892_sw_regulator_ops = { +static const struct regulator_ops mc13892_sw_regulator_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = mc13892_sw_regulator_set_voltage_sel, diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index 2243138d8a58..8ff19150ca92 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c @@ -99,7 +99,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) return rdev->desc->volt_table[val]; } -struct regulator_ops mc13xxx_regulator_ops = { +const struct regulator_ops mc13xxx_regulator_ops = { .enable = mc13xxx_regulator_enable, .disable = mc13xxx_regulator_disable, .is_enabled = mc13xxx_regulator_is_enabled, @@ -127,7 +127,7 @@ int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, } EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage); -struct regulator_ops mc13xxx_fixed_regulator_ops = { +const struct regulator_ops mc13xxx_fixed_regulator_ops = { .enable = mc13xxx_regulator_enable, .disable = mc13xxx_regulator_disable, .is_enabled = mc13xxx_regulator_is_enabled, diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h index 2ab9bfd93b4e..ba7eff1070bd 100644 --- a/drivers/regulator/mc13xxx.h +++ b/drivers/regulator/mc13xxx.h @@ -53,13 +53,13 @@ static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt( } #endif -extern struct regulator_ops mc13xxx_regulator_ops; -extern struct regulator_ops mc13xxx_fixed_regulator_ops; +extern const struct regulator_ops mc13xxx_regulator_ops; +extern const struct regulator_ops mc13xxx_fixed_regulator_ops; -#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \ +#define MC13xxx_DEFINE(prefix, _name, _node, _reg, _vsel_reg, _voltages, _ops) \ [prefix ## _name] = { \ .desc = { \ - .name = #_name, \ + .name = #_node, \ .n_voltages = ARRAY_SIZE(_voltages), \ .volt_table = _voltages, \ .ops = &_ops, \ @@ -74,10 +74,10 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\ } -#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \ +#define MC13xxx_FIXED_DEFINE(prefix, _name, _node, _reg, _voltages, _ops) \ [prefix ## _name] = { \ .desc = { \ - .name = #_name, \ + .name = #_node, \ .n_voltages = ARRAY_SIZE(_voltages), \ .volt_table = _voltages, \ .ops = &_ops, \ @@ -89,10 +89,10 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ } -#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \ +#define MC13xxx_GPO_DEFINE(prefix, _name, _node, _reg, _voltages, _ops) \ [prefix ## _name] = { \ .desc = { \ - .name = #_name, \ + .name = #_node, \ .n_voltages = ARRAY_SIZE(_voltages), \ .volt_table = _voltages, \ .ops = &_ops, \ @@ -104,9 +104,9 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ } -#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \ - MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops) -#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops) \ - MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops) +#define MC13xxx_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages, ops) \ + MC13xxx_DEFINE(SW, _name, _node, _reg, _vsel_reg, _voltages, ops) +#define MC13xxx_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages, ops) \ + MC13xxx_DEFINE(REGU, _name, _node, _reg, _vsel_reg, _voltages, ops) #endif diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c index 3479ae009b0b..3a8004abe044 100644 --- a/drivers/regulator/mcp16502.c +++ b/drivers/regulator/mcp16502.c @@ -17,6 +17,7 @@ #include <linux/regmap.h> #include <linux/regulator/driver.h> #include <linux/suspend.h> +#include <linux/gpio/consumer.h> #define VDD_LOW_SEL 0x0D #define VDD_HIGH_SEL 0x3F @@ -546,7 +547,6 @@ static struct i2c_driver mcp16502_drv = { module_i2c_driver(mcp16502_drv); -MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MCP16502 PMIC driver"); MODULE_AUTHOR("Andrei Stefanescu andrei.stefanescu@microchip.com"); diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c index 0495716fd35f..01d69f43d2b0 100644 --- a/drivers/regulator/mt6311-regulator.c +++ b/drivers/regulator/mt6311-regulator.c @@ -38,13 +38,9 @@ static const struct regmap_config mt6311_regmap_config = { #define MT6311_MAX_UV 1393750 #define MT6311_STEP_UV 6250 -static const struct regulator_linear_range buck_volt_range[] = { - REGULATOR_LINEAR_RANGE(MT6311_MIN_UV, 0, 0x7f, MT6311_STEP_UV), -}; - static const struct regulator_ops mt6311_buck_ops = { - .list_voltage = regulator_list_voltage_linear_range, - .map_voltage = regulator_map_voltage_linear_range, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, @@ -71,8 +67,6 @@ static const struct regulator_ops mt6311_ldo_ops = { .min_uV = MT6311_MIN_UV,\ .uV_step = MT6311_STEP_UV,\ .owner = THIS_MODULE,\ - .linear_ranges = buck_volt_range, \ - .n_linear_ranges = ARRAY_SIZE(buck_volt_range), \ .enable_reg = MT6311_VDVFS11_CON9,\ .enable_mask = MT6311_PMIC_VDVFS11_EN_MASK,\ .vsel_reg = MT6311_VDVFS11_CON12,\ diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index ffa5fc3724e4..7b6bf3536271 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -255,7 +255,7 @@ static void of_get_regulation_constraints(struct device_node *np, * @desc: regulator description * * Populates regulator_init_data structure by extracting data from device - * tree node, returns a pointer to the populated struture or NULL if memory + * tree node, returns a pointer to the populated structure or NULL if memory * alloc fails. */ struct regulator_init_data *of_get_regulator_init_data(struct device *dev, @@ -547,7 +547,7 @@ bool of_check_coupling_data(struct regulator_dev *rdev) NULL); if (c_n_phandles != n_phandles) { - dev_err(&rdev->dev, "number of couped reg phandles mismatch\n"); + dev_err(&rdev->dev, "number of coupled reg phandles mismatch\n"); ret = false; goto clean; } diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index c2cc392a27d4..7fb9e8dd834e 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -382,7 +382,7 @@ static struct palmas_sleep_requestor_info tps65917_sleep_req_info[] = { EXTERNAL_REQUESTOR_TPS65917(LDO5, 2, 4), }; -static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500}; +static const unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500}; #define SMPS_CTRL_MODE_OFF 0x00 #define SMPS_CTRL_MODE_ON 0x01 diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c index a9446056435f..1600f9821891 100644 --- a/drivers/regulator/pv88060-regulator.c +++ b/drivers/regulator/pv88060-regulator.c @@ -53,10 +53,6 @@ enum { struct pv88060_regulator { struct regulator_desc desc; - /* Current limiting */ - unsigned n_current_limits; - const int *current_limits; - unsigned int limit_mask; unsigned int conf; /* buck configuration register */ }; @@ -75,7 +71,7 @@ static const struct regmap_config pv88060_regmap_config = { * Entry indexes corresponds to register values. */ -static const int pv88060_buck1_limits[] = { +static const unsigned int pv88060_buck1_limits[] = { 1496000, 2393000, 3291000, 4189000 }; @@ -128,40 +124,6 @@ static int pv88060_buck_set_mode(struct regulator_dev *rdev, PV88060_BUCK_MODE_MASK, val); } -static int pv88060_set_current_limit(struct regulator_dev *rdev, int min, - int max) -{ - struct pv88060_regulator *info = rdev_get_drvdata(rdev); - int i; - - /* search for closest to maximum */ - for (i = info->n_current_limits; i >= 0; i--) { - if (min <= info->current_limits[i] - && max >= info->current_limits[i]) { - return regmap_update_bits(rdev->regmap, - info->conf, - info->limit_mask, - i << PV88060_BUCK_ILIM_SHIFT); - } - } - - return -EINVAL; -} - -static int pv88060_get_current_limit(struct regulator_dev *rdev) -{ - struct pv88060_regulator *info = rdev_get_drvdata(rdev); - unsigned int data; - int ret; - - ret = regmap_read(rdev->regmap, info->conf, &data); - if (ret < 0) - return ret; - - data = (data & info->limit_mask) >> PV88060_BUCK_ILIM_SHIFT; - return info->current_limits[data]; -} - static const struct regulator_ops pv88060_buck_ops = { .get_mode = pv88060_buck_get_mode, .set_mode = pv88060_buck_set_mode, @@ -171,8 +133,8 @@ static const struct regulator_ops pv88060_buck_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, - .set_current_limit = pv88060_set_current_limit, - .get_current_limit = pv88060_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; static const struct regulator_ops pv88060_ldo_ops = { @@ -184,6 +146,12 @@ static const struct regulator_ops pv88060_ldo_ops = { .list_voltage = regulator_list_voltage_linear, }; +static const struct regulator_ops pv88060_sw_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + #define PV88060_BUCK(chip, regl_name, min, step, max, limits_array) \ {\ .desc = {\ @@ -201,10 +169,11 @@ static const struct regulator_ops pv88060_ldo_ops = { .enable_mask = PV88060_BUCK_EN, \ .vsel_reg = PV88060_REG_##regl_name##_CONF0,\ .vsel_mask = PV88060_VBUCK_MASK,\ + .curr_table = limits_array,\ + .n_current_limits = ARRAY_SIZE(limits_array),\ + .csel_reg = PV88060_REG_##regl_name##_CONF1,\ + .csel_mask = PV88060_BUCK_ILIM_MASK,\ },\ - .current_limits = limits_array,\ - .n_current_limits = ARRAY_SIZE(limits_array),\ - .limit_mask = PV88060_BUCK_ILIM_MASK, \ .conf = PV88060_REG_##regl_name##_CONF1,\ } @@ -237,9 +206,8 @@ static const struct regulator_ops pv88060_ldo_ops = { .regulators_node = of_match_ptr("regulators"),\ .type = REGULATOR_VOLTAGE,\ .owner = THIS_MODULE,\ - .ops = &pv88060_ldo_ops,\ - .min_uV = max,\ - .uV_step = 0,\ + .ops = &pv88060_sw_ops,\ + .fixed_uV = max,\ .n_voltages = 1,\ .enable_reg = PV88060_REG_##regl_name##_CONF,\ .enable_mask = PV88060_SW_EN,\ diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c index 9a08cb2de501..bdddacdbeb99 100644 --- a/drivers/regulator/pv88080-regulator.c +++ b/drivers/regulator/pv88080-regulator.c @@ -45,12 +45,7 @@ enum pv88080_types { struct pv88080_regulator { struct regulator_desc desc; - /* Current limiting */ - unsigned int n_current_limits; - const int *current_limits; - unsigned int limit_mask; unsigned int mode_reg; - unsigned int limit_reg; unsigned int conf2; unsigned int conf5; }; @@ -102,11 +97,11 @@ static const struct regmap_config pv88080_regmap_config = { * Entry indexes corresponds to register values. */ -static const int pv88080_buck1_limits[] = { +static const unsigned int pv88080_buck1_limits[] = { 3230000, 5130000, 6960000, 8790000 }; -static const int pv88080_buck23_limits[] = { +static const unsigned int pv88080_buck23_limits[] = { 1496000, 2393000, 3291000, 4189000 }; @@ -272,40 +267,6 @@ static int pv88080_buck_set_mode(struct regulator_dev *rdev, PV88080_BUCK1_MODE_MASK, val); } -static int pv88080_set_current_limit(struct regulator_dev *rdev, int min, - int max) -{ - struct pv88080_regulator *info = rdev_get_drvdata(rdev); - int i; - - /* search for closest to maximum */ - for (i = info->n_current_limits; i >= 0; i--) { - if (min <= info->current_limits[i] - && max >= info->current_limits[i]) { - return regmap_update_bits(rdev->regmap, - info->limit_reg, - info->limit_mask, - i << PV88080_BUCK1_ILIM_SHIFT); - } - } - - return -EINVAL; -} - -static int pv88080_get_current_limit(struct regulator_dev *rdev) -{ - struct pv88080_regulator *info = rdev_get_drvdata(rdev); - unsigned int data; - int ret; - - ret = regmap_read(rdev->regmap, info->limit_reg, &data); - if (ret < 0) - return ret; - - data = (data & info->limit_mask) >> PV88080_BUCK1_ILIM_SHIFT; - return info->current_limits[data]; -} - static const struct regulator_ops pv88080_buck_ops = { .get_mode = pv88080_buck_get_mode, .set_mode = pv88080_buck_set_mode, @@ -315,8 +276,8 @@ static const struct regulator_ops pv88080_buck_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, - .set_current_limit = pv88080_set_current_limit, - .get_current_limit = pv88080_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; static const struct regulator_ops pv88080_hvbuck_ops = { @@ -341,9 +302,9 @@ static const struct regulator_ops pv88080_hvbuck_ops = { .min_uV = min, \ .uV_step = step, \ .n_voltages = ((max) - (min))/(step) + 1, \ + .curr_table = limits_array, \ + .n_current_limits = ARRAY_SIZE(limits_array), \ },\ - .current_limits = limits_array, \ - .n_current_limits = ARRAY_SIZE(limits_array), \ } #define PV88080_HVBUCK(chip, regl_name, min, step, max) \ @@ -521,9 +482,9 @@ static int pv88080_i2c_probe(struct i2c_client *i2c, if (init_data) config.init_data = &init_data[i]; - pv88080_regulator_info[i].limit_reg + pv88080_regulator_info[i].desc.csel_reg = regmap_config->buck_regmap[i].buck_limit_reg; - pv88080_regulator_info[i].limit_mask + pv88080_regulator_info[i].desc.csel_mask = regmap_config->buck_regmap[i].buck_limit_mask; pv88080_regulator_info[i].mode_reg = regmap_config->buck_regmap[i].buck_mode_reg; diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c index 7a0c15957bd0..6e97cc6df2ee 100644 --- a/drivers/regulator/pv88090-regulator.c +++ b/drivers/regulator/pv88090-regulator.c @@ -42,10 +42,6 @@ enum { struct pv88090_regulator { struct regulator_desc desc; - /* Current limiting */ - unsigned int n_current_limits; - const int *current_limits; - unsigned int limit_mask; unsigned int conf; unsigned int conf2; }; @@ -71,14 +67,14 @@ static const struct regmap_config pv88090_regmap_config = { * Entry indexes corresponds to register values. */ -static const int pv88090_buck1_limits[] = { +static const unsigned int pv88090_buck1_limits[] = { 220000, 440000, 660000, 880000, 1100000, 1320000, 1540000, 1760000, 1980000, 2200000, 2420000, 2640000, 2860000, 3080000, 3300000, 3520000, 3740000, 3960000, 4180000, 4400000, 4620000, 4840000, 5060000, 5280000, 5500000, 5720000, 5940000, 6160000, 6380000, 6600000, 6820000, 7040000 }; -static const int pv88090_buck23_limits[] = { +static const unsigned int pv88090_buck23_limits[] = { 1496000, 2393000, 3291000, 4189000 }; @@ -150,40 +146,6 @@ static int pv88090_buck_set_mode(struct regulator_dev *rdev, PV88090_BUCK1_MODE_MASK, val); } -static int pv88090_set_current_limit(struct regulator_dev *rdev, int min, - int max) -{ - struct pv88090_regulator *info = rdev_get_drvdata(rdev); - int i; - - /* search for closest to maximum */ - for (i = info->n_current_limits; i >= 0; i--) { - if (min <= info->current_limits[i] - && max >= info->current_limits[i]) { - return regmap_update_bits(rdev->regmap, - info->conf, - info->limit_mask, - i << PV88090_BUCK1_ILIM_SHIFT); - } - } - - return -EINVAL; -} - -static int pv88090_get_current_limit(struct regulator_dev *rdev) -{ - struct pv88090_regulator *info = rdev_get_drvdata(rdev); - unsigned int data; - int ret; - - ret = regmap_read(rdev->regmap, info->conf, &data); - if (ret < 0) - return ret; - - data = (data & info->limit_mask) >> PV88090_BUCK1_ILIM_SHIFT; - return info->current_limits[data]; -} - static const struct regulator_ops pv88090_buck_ops = { .get_mode = pv88090_buck_get_mode, .set_mode = pv88090_buck_set_mode, @@ -193,8 +155,8 @@ static const struct regulator_ops pv88090_buck_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, - .set_current_limit = pv88090_set_current_limit, - .get_current_limit = pv88090_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; static const struct regulator_ops pv88090_ldo_ops = { @@ -223,10 +185,11 @@ static const struct regulator_ops pv88090_ldo_ops = { .enable_mask = PV88090_##regl_name##_EN, \ .vsel_reg = PV88090_REG_##regl_name##_CONF0, \ .vsel_mask = PV88090_V##regl_name##_MASK, \ + .curr_table = limits_array, \ + .n_current_limits = ARRAY_SIZE(limits_array), \ + .csel_reg = PV88090_REG_##regl_name##_CONF1, \ + .csel_mask = PV88090_##regl_name##_ILIM_MASK, \ },\ - .current_limits = limits_array, \ - .n_current_limits = ARRAY_SIZE(limits_array), \ - .limit_mask = PV88090_##regl_name##_ILIM_MASK, \ .conf = PV88090_REG_##regl_name##_CONF1, \ .conf2 = PV88090_REG_##regl_name##_CONF2, \ } diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index a2fd140eff81..3f53f9134b32 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -40,9 +40,6 @@ struct pwm_regulator_data { /* regulator descriptor */ struct regulator_desc desc; - /* Regulator ops */ - struct regulator_ops ops; - int state; /* Enable GPIO */ @@ -231,7 +228,7 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev, return 0; } -static struct regulator_ops pwm_regulator_voltage_table_ops = { +static const struct regulator_ops pwm_regulator_voltage_table_ops = { .set_voltage_sel = pwm_regulator_set_voltage_sel, .get_voltage_sel = pwm_regulator_get_voltage_sel, .list_voltage = pwm_regulator_list_voltage, @@ -241,7 +238,7 @@ static struct regulator_ops pwm_regulator_voltage_table_ops = { .is_enabled = pwm_regulator_is_enabled, }; -static struct regulator_ops pwm_regulator_voltage_continuous_ops = { +static const struct regulator_ops pwm_regulator_voltage_continuous_ops = { .get_voltage = pwm_regulator_get_voltage, .set_voltage = pwm_regulator_set_voltage, .enable = pwm_regulator_enable, @@ -249,7 +246,7 @@ static struct regulator_ops pwm_regulator_voltage_continuous_ops = { .is_enabled = pwm_regulator_is_enabled, }; -static struct regulator_desc pwm_regulator_desc = { +static const struct regulator_desc pwm_regulator_desc = { .name = "pwm-regulator", .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, @@ -287,9 +284,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev, drvdata->state = -EINVAL; drvdata->duty_cycle_table = duty_cycle_table; - memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops, - sizeof(drvdata->ops)); - drvdata->desc.ops = &drvdata->ops; + drvdata->desc.ops = &pwm_regulator_voltage_table_ops; drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table); return 0; @@ -301,9 +296,7 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev, u32 dutycycle_range[2] = { 0, 100 }; u32 dutycycle_unit = 100; - memcpy(&drvdata->ops, &pwm_regulator_voltage_continuous_ops, - sizeof(drvdata->ops)); - drvdata->desc.ops = &drvdata->ops; + drvdata->desc.ops = &pwm_regulator_voltage_continuous_ops; drvdata->desc.continuous_voltage_range = true; of_property_read_u32_array(pdev->dev.of_node, diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index f5bca77d67c1..68bc23df4213 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -31,6 +31,11 @@ struct qcom_rpm_reg { int is_enabled; int uV; + u32 load; + + unsigned int enabled_updated:1; + unsigned int uv_updated:1; + unsigned int load_updated:1; }; struct rpm_regulator_req { @@ -43,30 +48,59 @@ struct rpm_regulator_req { #define RPM_KEY_UV 0x00007675 /* "uv" */ #define RPM_KEY_MA 0x0000616d /* "ma" */ -static int rpm_reg_write_active(struct qcom_rpm_reg *vreg, - struct rpm_regulator_req *req, - size_t size) +static int rpm_reg_write_active(struct qcom_rpm_reg *vreg) { - return qcom_rpm_smd_write(vreg->rpm, - QCOM_SMD_RPM_ACTIVE_STATE, - vreg->type, - vreg->id, - req, size); + struct rpm_regulator_req req[3]; + int reqlen = 0; + int ret; + + if (vreg->enabled_updated) { + req[reqlen].key = cpu_to_le32(RPM_KEY_SWEN); + req[reqlen].nbytes = cpu_to_le32(sizeof(u32)); + req[reqlen].value = cpu_to_le32(vreg->is_enabled); + reqlen++; + } + + if (vreg->uv_updated && vreg->is_enabled) { + req[reqlen].key = cpu_to_le32(RPM_KEY_UV); + req[reqlen].nbytes = cpu_to_le32(sizeof(u32)); + req[reqlen].value = cpu_to_le32(vreg->uV); + reqlen++; + } + + if (vreg->load_updated && vreg->is_enabled) { + req[reqlen].key = cpu_to_le32(RPM_KEY_MA); + req[reqlen].nbytes = cpu_to_le32(sizeof(u32)); + req[reqlen].value = cpu_to_le32(vreg->load / 1000); + reqlen++; + } + + if (!reqlen) + return 0; + + ret = qcom_rpm_smd_write(vreg->rpm, QCOM_SMD_RPM_ACTIVE_STATE, + vreg->type, vreg->id, + req, sizeof(req[0]) * reqlen); + if (!ret) { + vreg->enabled_updated = 0; + vreg->uv_updated = 0; + vreg->load_updated = 0; + } + + return ret; } static int rpm_reg_enable(struct regulator_dev *rdev) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); - struct rpm_regulator_req req; int ret; - req.key = cpu_to_le32(RPM_KEY_SWEN); - req.nbytes = cpu_to_le32(sizeof(u32)); - req.value = cpu_to_le32(1); + vreg->is_enabled = 1; + vreg->enabled_updated = 1; - ret = rpm_reg_write_active(vreg, &req, sizeof(req)); - if (!ret) - vreg->is_enabled = 1; + ret = rpm_reg_write_active(vreg); + if (ret) + vreg->is_enabled = 0; return ret; } @@ -81,16 +115,14 @@ static int rpm_reg_is_enabled(struct regulator_dev *rdev) static int rpm_reg_disable(struct regulator_dev *rdev) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); - struct rpm_regulator_req req; int ret; - req.key = cpu_to_le32(RPM_KEY_SWEN); - req.nbytes = cpu_to_le32(sizeof(u32)); - req.value = 0; + vreg->is_enabled = 0; + vreg->enabled_updated = 1; - ret = rpm_reg_write_active(vreg, &req, sizeof(req)); - if (!ret) - vreg->is_enabled = 0; + ret = rpm_reg_write_active(vreg); + if (ret) + vreg->is_enabled = 1; return ret; } @@ -108,16 +140,15 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev, unsigned *selector) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); - struct rpm_regulator_req req; - int ret = 0; + int ret; + int old_uV = vreg->uV; - req.key = cpu_to_le32(RPM_KEY_UV); - req.nbytes = cpu_to_le32(sizeof(u32)); - req.value = cpu_to_le32(min_uV); + vreg->uV = min_uV; + vreg->uv_updated = 1; - ret = rpm_reg_write_active(vreg, &req, sizeof(req)); - if (!ret) - vreg->uV = min_uV; + ret = rpm_reg_write_active(vreg); + if (ret) + vreg->uV = old_uV; return ret; } @@ -125,13 +156,16 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev, static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); - struct rpm_regulator_req req; + u32 old_load = vreg->load; + int ret; - req.key = cpu_to_le32(RPM_KEY_MA); - req.nbytes = cpu_to_le32(sizeof(u32)); - req.value = cpu_to_le32(load_uA / 1000); + vreg->load = load_uA; + vreg->load_updated = 1; + ret = rpm_reg_write_active(vreg); + if (ret) + vreg->load = old_load; - return rpm_reg_write_active(vreg, &req, sizeof(req)); + return ret; } static const struct regulator_ops rpm_smps_ldo_ops = { diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 213b68743cc8..23713e16c286 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1,5 +1,5 @@ /* - * Regulator driver for Rockchip RK808/RK818 + * Regulator driver for Rockchip RK805/RK808/RK818 * * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * @@ -363,28 +363,28 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev) rdev->desc->enable_mask); } -static struct regulator_ops rk805_reg_ops = { - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .set_suspend_voltage = rk808_set_suspend_voltage, - .set_suspend_enable = rk805_set_suspend_enable, - .set_suspend_disable = rk805_set_suspend_disable, +static const struct regulator_ops rk805_reg_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_suspend_voltage = rk808_set_suspend_voltage, + .set_suspend_enable = rk805_set_suspend_enable, + .set_suspend_disable = rk805_set_suspend_disable, }; -static struct regulator_ops rk805_switch_ops = { - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .set_suspend_enable = rk805_set_suspend_enable, - .set_suspend_disable = rk805_set_suspend_disable, +static const struct regulator_ops rk805_switch_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_suspend_enable = rk805_set_suspend_enable, + .set_suspend_disable = rk805_set_suspend_disable, }; -static struct regulator_ops rk808_buck1_2_ops = { +static const struct regulator_ops rk808_buck1_2_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = rk808_buck1_2_get_voltage_sel_regmap, @@ -399,7 +399,7 @@ static struct regulator_ops rk808_buck1_2_ops = { .set_suspend_disable = rk808_set_suspend_disable, }; -static struct regulator_ops rk808_reg_ops = { +static const struct regulator_ops rk808_reg_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, @@ -412,7 +412,7 @@ static struct regulator_ops rk808_reg_ops = { .set_suspend_disable = rk808_set_suspend_disable, }; -static struct regulator_ops rk808_reg_ops_ranges = { +static const struct regulator_ops rk808_reg_ops_ranges = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .get_voltage_sel = regulator_get_voltage_sel_regmap, @@ -425,7 +425,7 @@ static struct regulator_ops rk808_reg_ops_ranges = { .set_suspend_disable = rk808_set_suspend_disable, }; -static struct regulator_ops rk808_switch_ops = { +static const struct regulator_ops rk808_switch_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -433,6 +433,12 @@ static struct regulator_ops rk808_switch_ops = { .set_suspend_disable = rk808_set_suspend_disable, }; +static const struct regulator_linear_range rk805_buck_1_2_voltage_ranges[] = { + REGULATOR_LINEAR_RANGE(712500, 0, 59, 12500), + REGULATOR_LINEAR_RANGE(1800000, 60, 62, 200000), + REGULATOR_LINEAR_RANGE(2300000, 63, 63, 0), +}; + static const struct regulator_desc rk805_reg[] = { { .name = "DCDC_REG1", @@ -440,11 +446,11 @@ static const struct regulator_desc rk805_reg[] = { .of_match = of_match_ptr("DCDC_REG1"), .regulators_node = of_match_ptr("regulators"), .id = RK805_ID_DCDC1, - .ops = &rk805_reg_ops, + .ops = &rk808_reg_ops_ranges, .type = REGULATOR_VOLTAGE, - .min_uV = 712500, - .uV_step = 12500, .n_voltages = 64, + .linear_ranges = rk805_buck_1_2_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk805_buck_1_2_voltage_ranges), .vsel_reg = RK805_BUCK1_ON_VSEL_REG, .vsel_mask = RK818_BUCK_VSEL_MASK, .enable_reg = RK805_DCDC_EN_REG, @@ -456,11 +462,11 @@ static const struct regulator_desc rk805_reg[] = { .of_match = of_match_ptr("DCDC_REG2"), .regulators_node = of_match_ptr("regulators"), .id = RK805_ID_DCDC2, - .ops = &rk805_reg_ops, + .ops = &rk808_reg_ops_ranges, .type = REGULATOR_VOLTAGE, - .min_uV = 712500, - .uV_step = 12500, .n_voltages = 64, + .linear_ranges = rk805_buck_1_2_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk805_buck_1_2_voltage_ranges), .vsel_reg = RK805_BUCK2_ON_VSEL_REG, .vsel_mask = RK818_BUCK_VSEL_MASK, .enable_reg = RK805_DCDC_EN_REG, @@ -796,7 +802,7 @@ static struct platform_driver rk808_regulator_driver = { module_platform_driver(rk808_regulator_driver); -MODULE_DESCRIPTION("regulator driver for the RK808/RK818 series PMICs"); +MODULE_DESCRIPTION("regulator driver for the RK805/RK808/RK818 series PMICs"); MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>"); MODULE_AUTHOR("Zhang Qing <zhangqing@rock-chips.com>"); MODULE_AUTHOR("Wadim Egorov <w.egorov@phytec.de>"); diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c index 96d2c18e051a..639cbadc044a 100644 --- a/drivers/regulator/rt5033-regulator.c +++ b/drivers/regulator/rt5033-regulator.c @@ -16,14 +16,14 @@ #include <linux/mfd/rt5033-private.h> #include <linux/regulator/of_regulator.h> -static struct regulator_ops rt5033_safe_ldo_ops = { +static const struct regulator_ops rt5033_safe_ldo_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, }; -static struct regulator_ops rt5033_buck_ops = { +static const struct regulator_ops rt5033_buck_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index 095d25f3d2ea..58a1fe583a6c 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = { regulator_desc_ldo(2, STEP_50_MV), regulator_desc_ldo(3, STEP_50_MV), regulator_desc_ldo(4, STEP_50_MV), - regulator_desc_ldo(5, STEP_50_MV), + regulator_desc_ldo(5, STEP_25_MV), regulator_desc_ldo(6, STEP_25_MV), regulator_desc_ldo(7, STEP_50_MV), regulator_desc_ldo(8, STEP_50_MV), regulator_desc_ldo(9, STEP_50_MV), regulator_desc_ldo(10, STEP_50_MV), - regulator_desc_ldo(11, STEP_25_MV), + regulator_desc_ldo(11, STEP_50_MV), regulator_desc_ldo(12, STEP_50_MV), regulator_desc_ldo(13, STEP_50_MV), regulator_desc_ldo(14, STEP_50_MV), @@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = { regulator_desc_ldo(19, STEP_50_MV), regulator_desc_ldo(20, STEP_50_MV), regulator_desc_ldo(21, STEP_50_MV), - regulator_desc_ldo(22, STEP_25_MV), - regulator_desc_ldo(23, STEP_25_MV), + regulator_desc_ldo(22, STEP_50_MV), + regulator_desc_ldo(23, STEP_50_MV), regulator_desc_ldo(24, STEP_50_MV), regulator_desc_ldo(25, STEP_50_MV), - regulator_desc_ldo(26, STEP_50_MV), + regulator_desc_ldo(26, STEP_25_MV), regulator_desc_buck1_4(1), regulator_desc_buck1_4(2), regulator_desc_buck1_4(3), diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index ee4a23ab0663..134c62db36c5 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -362,7 +362,7 @@ static const struct regulator_desc s2mps11_regulators[] = { regulator_desc_s2mps11_ldo(32, STEP_50_MV), regulator_desc_s2mps11_ldo(33, STEP_50_MV), regulator_desc_s2mps11_ldo(34, STEP_50_MV), - regulator_desc_s2mps11_ldo(35, STEP_50_MV), + regulator_desc_s2mps11_ldo(35, STEP_25_MV), regulator_desc_s2mps11_ldo(36, STEP_50_MV), regulator_desc_s2mps11_ldo(37, STEP_50_MV), regulator_desc_s2mps11_ldo(38, STEP_50_MV), @@ -372,8 +372,8 @@ static const struct regulator_desc s2mps11_regulators[] = { regulator_desc_s2mps11_buck1_4(4), regulator_desc_s2mps11_buck5, regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV), - regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV), - regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV), + regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV), regulator_desc_s2mps11_buck9, regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV), }; diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index b581f01f3395..bb9d1a083299 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -115,7 +115,7 @@ static const struct sec_voltage_desc *reg_voltage_map[] = { [S5M8767_BUCK9] = &buck_voltage_val3, }; -static unsigned int s5m8767_opmode_reg[][4] = { +static const unsigned int s5m8767_opmode_reg[][4] = { /* {OFF, ON, LOWPOWER, SUSPEND} */ /* LDO1 ... LDO28 */ {0x0, 0x3, 0x2, 0x1}, /* LDO1 */ @@ -339,13 +339,9 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int new_sel) { struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev); - const struct sec_voltage_desc *desc; - int reg_id = rdev_get_id(rdev); - - desc = reg_voltage_map[reg_id]; if ((old_sel < new_sel) && s5m8767->ramp_delay) - return DIV_ROUND_UP(desc->step * (new_sel - old_sel), + return DIV_ROUND_UP(rdev->desc->uV_step * (new_sel - old_sel), s5m8767->ramp_delay * 1000); return 0; } diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index e0a9c445ed67..ba2f24949dc9 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -15,6 +15,7 @@ #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> +#include <linux/pm_runtime.h> /* STM32 VREFBUF registers */ #define STM32_VREFBUF_CSR 0x00 @@ -25,9 +26,12 @@ #define STM32_HIZ BIT(1) #define STM32_ENVR BIT(0) +#define STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS 10 + struct stm32_vrefbuf { void __iomem *base; struct clk *clk; + struct device *dev; }; static const unsigned int stm32_vrefbuf_voltages[] = { @@ -38,9 +42,16 @@ static const unsigned int stm32_vrefbuf_voltages[] = { static int stm32_vrefbuf_enable(struct regulator_dev *rdev) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); - u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + u32 val; int ret; + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); val = (val & ~STM32_HIZ) | STM32_ENVR; writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); @@ -59,45 +70,95 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev) writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); } + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + return ret; } static int stm32_vrefbuf_disable(struct regulator_dev *rdev) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); - u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + u32 val; + int ret; + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); val = (val & ~STM32_ENVR) | STM32_HIZ; writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + return 0; } static int stm32_vrefbuf_is_enabled(struct regulator_dev *rdev) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); + int ret; + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + ret = readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR; - return readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR; + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + + return ret; } static int stm32_vrefbuf_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); - u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + u32 val; + int ret; + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); val = (val & ~STM32_VRS) | FIELD_PREP(STM32_VRS, sel); writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + return 0; } static int stm32_vrefbuf_get_voltage_sel(struct regulator_dev *rdev) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); - u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + u32 val; + int ret; - return FIELD_GET(STM32_VRS, val); + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + ret = FIELD_GET(STM32_VRS, val); + + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + + return ret; } static const struct regulator_ops stm32_vrefbuf_volt_ops = { @@ -130,6 +191,7 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev) priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + priv->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(&pdev->dev, res); @@ -140,10 +202,17 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, + STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = clk_prepare_enable(priv->clk); if (ret) { dev_err(&pdev->dev, "clk prepare failed with error %d\n", ret); - return ret; + goto err_pm_stop; } config.dev = &pdev->dev; @@ -161,10 +230,17 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, rdev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; err_clk_dis: clk_disable_unprepare(priv->clk); +err_pm_stop: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); return ret; } @@ -174,12 +250,42 @@ static int stm32_vrefbuf_remove(struct platform_device *pdev) struct regulator_dev *rdev = platform_get_drvdata(pdev); struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); + pm_runtime_get_sync(&pdev->dev); regulator_unregister(rdev); clk_disable_unprepare(priv->clk); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); return 0; }; +static int __maybe_unused stm32_vrefbuf_runtime_suspend(struct device *dev) +{ + struct regulator_dev *rdev = dev_get_drvdata(dev); + struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused stm32_vrefbuf_runtime_resume(struct device *dev) +{ + struct regulator_dev *rdev = dev_get_drvdata(dev); + struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); + + return clk_prepare_enable(priv->clk); +} + +static const struct dev_pm_ops stm32_vrefbuf_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(stm32_vrefbuf_runtime_suspend, + stm32_vrefbuf_runtime_resume, + NULL) +}; + static const struct of_device_id stm32_vrefbuf_of_match[] = { { .compatible = "st,stm32-vrefbuf", }, {}, @@ -192,6 +298,7 @@ static struct platform_driver stm32_vrefbuf_driver = { .driver = { .name = "stm32-vrefbuf", .of_match_table = of_match_ptr(stm32_vrefbuf_of_match), + .pm = &stm32_vrefbuf_pm_ops, }, }; module_platform_driver(stm32_vrefbuf_driver); diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c index 16ba0297f709..f09061473613 100644 --- a/drivers/regulator/stpmic1_regulator.c +++ b/drivers/regulator/stpmic1_regulator.c @@ -12,8 +12,10 @@ #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> +#include <dt-bindings/mfd/st,stpmic1.h> + /** - * stpmic1 regulator description + * stpmic1 regulator description: this structure is used as driver data * @desc: regulator framework description * @mask_reset_reg: mask reset register address * @mask_reset_mask: mask rank and mask reset register mask @@ -28,28 +30,9 @@ struct stpmic1_regulator_cfg { u8 icc_mask; }; -/** - * stpmic1 regulator data: this structure is used as driver data - * @regul_id: regulator id - * @reg_node: DT node of regulator (unused on non-DT platforms) - * @cfg: stpmic specific regulator description - * @mask_reset: mask_reset bit value - * @irq_curlim: current limit interrupt number - * @regmap: point to parent regmap structure - */ -struct stpmic1_regulator { - unsigned int regul_id; - struct device_node *reg_node; - struct stpmic1_regulator_cfg *cfg; - u8 mask_reset; - int irq_curlim; - struct regmap *regmap; -}; - static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode); static unsigned int stpmic1_get_mode(struct regulator_dev *rdev); static int stpmic1_set_icc(struct regulator_dev *rdev); -static int stpmic1_regulator_parse_dt(void *driver_data); static unsigned int stpmic1_map_mode(unsigned int mode); enum { @@ -72,15 +55,13 @@ enum { /* Enable time worst case is 5000mV/(2250uV/uS) */ #define PMIC_ENABLE_TIME_US 2200 -#define STPMIC1_BUCK_MODE_NORMAL 0 -#define STPMIC1_BUCK_MODE_LP BUCK_HPLP_ENABLE_MASK - -struct regulator_linear_range buck1_ranges[] = { - REGULATOR_LINEAR_RANGE(600000, 0, 30, 25000), - REGULATOR_LINEAR_RANGE(1350000, 31, 63, 0), +static const struct regulator_linear_range buck1_ranges[] = { + REGULATOR_LINEAR_RANGE(725000, 0, 4, 0), + REGULATOR_LINEAR_RANGE(725000, 5, 36, 25000), + REGULATOR_LINEAR_RANGE(1500000, 37, 63, 0), }; -struct regulator_linear_range buck2_ranges[] = { +static const struct regulator_linear_range buck2_ranges[] = { REGULATOR_LINEAR_RANGE(1000000, 0, 17, 0), REGULATOR_LINEAR_RANGE(1050000, 18, 19, 0), REGULATOR_LINEAR_RANGE(1100000, 20, 21, 0), @@ -94,7 +75,7 @@ struct regulator_linear_range buck2_ranges[] = { REGULATOR_LINEAR_RANGE(1500000, 36, 63, 0), }; -struct regulator_linear_range buck3_ranges[] = { +static const struct regulator_linear_range buck3_ranges[] = { REGULATOR_LINEAR_RANGE(1000000, 0, 19, 0), REGULATOR_LINEAR_RANGE(1100000, 20, 23, 0), REGULATOR_LINEAR_RANGE(1200000, 24, 27, 0), @@ -102,10 +83,9 @@ struct regulator_linear_range buck3_ranges[] = { REGULATOR_LINEAR_RANGE(1400000, 32, 35, 0), REGULATOR_LINEAR_RANGE(1500000, 36, 55, 100000), REGULATOR_LINEAR_RANGE(3400000, 56, 63, 0), - }; -struct regulator_linear_range buck4_ranges[] = { +static const struct regulator_linear_range buck4_ranges[] = { REGULATOR_LINEAR_RANGE(600000, 0, 27, 25000), REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0), REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0), @@ -113,24 +93,21 @@ struct regulator_linear_range buck4_ranges[] = { REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0), REGULATOR_LINEAR_RANGE(1500000, 36, 60, 100000), REGULATOR_LINEAR_RANGE(3900000, 61, 63, 0), - }; -struct regulator_linear_range ldo1_ranges[] = { +static const struct regulator_linear_range ldo1_ranges[] = { REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0), REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000), REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0), - }; -struct regulator_linear_range ldo2_ranges[] = { +static const struct regulator_linear_range ldo2_ranges[] = { REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0), REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000), REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0), - }; -struct regulator_linear_range ldo3_ranges[] = { +static const struct regulator_linear_range ldo3_ranges[] = { REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0), REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000), REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0), @@ -138,18 +115,18 @@ struct regulator_linear_range ldo3_ranges[] = { REGULATOR_LINEAR_RANGE(500000, 31, 31, 0), }; -struct regulator_linear_range ldo5_ranges[] = { +static const struct regulator_linear_range ldo5_ranges[] = { REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0), REGULATOR_LINEAR_RANGE(1700000, 8, 30, 100000), REGULATOR_LINEAR_RANGE(3900000, 31, 31, 0), }; -struct regulator_linear_range ldo6_ranges[] = { +static const struct regulator_linear_range ldo6_ranges[] = { REGULATOR_LINEAR_RANGE(900000, 0, 24, 100000), REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0), }; -static struct regulator_ops stpmic1_ldo_ops = { +static const struct regulator_ops stpmic1_ldo_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .is_enabled = regulator_is_enabled_regmap, @@ -157,11 +134,10 @@ static struct regulator_ops stpmic1_ldo_ops = { .disable = regulator_disable_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, - .set_pull_down = regulator_set_pull_down_regmap, .set_over_current_protection = stpmic1_set_icc, }; -static struct regulator_ops stpmic1_ldo3_ops = { +static const struct regulator_ops stpmic1_ldo3_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_iterate, .is_enabled = regulator_is_enabled_regmap, @@ -169,21 +145,19 @@ static struct regulator_ops stpmic1_ldo3_ops = { .disable = regulator_disable_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, - .set_pull_down = regulator_set_pull_down_regmap, .get_bypass = regulator_get_bypass_regmap, .set_bypass = regulator_set_bypass_regmap, .set_over_current_protection = stpmic1_set_icc, }; -static struct regulator_ops stpmic1_ldo4_fixed_regul_ops = { +static const struct regulator_ops stpmic1_ldo4_fixed_regul_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, - .set_pull_down = regulator_set_pull_down_regmap, .set_over_current_protection = stpmic1_set_icc, }; -static struct regulator_ops stpmic1_buck_ops = { +static const struct regulator_ops stpmic1_buck_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .is_enabled = regulator_is_enabled_regmap, @@ -197,20 +171,27 @@ static struct regulator_ops stpmic1_buck_ops = { .set_over_current_protection = stpmic1_set_icc, }; -static struct regulator_ops stpmic1_vref_ddr_ops = { +static const struct regulator_ops stpmic1_vref_ddr_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, - .set_pull_down = regulator_set_pull_down_regmap, }; -static struct regulator_ops stpmic1_switch_regul_ops = { +static const struct regulator_ops stpmic1_boost_regul_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .set_over_current_protection = stpmic1_set_icc, }; +static const struct regulator_ops stpmic1_switch_regul_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .set_over_current_protection = stpmic1_set_icc, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + #define REG_LDO(ids, base) { \ .name = #ids, \ .id = STPMIC1_##ids, \ @@ -227,8 +208,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .enable_val = 1, \ .disable_val = 0, \ .enable_time = PMIC_ENABLE_TIME_US, \ - .pull_down_reg = ids##_PULL_DOWN_REG, \ - .pull_down_mask = ids##_PULL_DOWN_MASK, \ .supply_name = #base, \ } @@ -252,8 +231,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .bypass_mask = LDO_BYPASS_MASK, \ .bypass_val_on = LDO_BYPASS_MASK, \ .bypass_val_off = 0, \ - .pull_down_reg = ids##_PULL_DOWN_REG, \ - .pull_down_mask = ids##_PULL_DOWN_MASK, \ .supply_name = #base, \ } @@ -271,8 +248,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .enable_val = 1, \ .disable_val = 0, \ .enable_time = PMIC_ENABLE_TIME_US, \ - .pull_down_reg = ids##_PULL_DOWN_REG, \ - .pull_down_mask = ids##_PULL_DOWN_MASK, \ .supply_name = #base, \ } @@ -312,12 +287,47 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .enable_val = 1, \ .disable_val = 0, \ .enable_time = PMIC_ENABLE_TIME_US, \ - .pull_down_reg = ids##_PULL_DOWN_REG, \ - .pull_down_mask = ids##_PULL_DOWN_MASK, \ .supply_name = #base, \ } -#define REG_SWITCH(ids, base, reg, mask, val) { \ +#define REG_BOOST(ids, base) { \ + .name = #ids, \ + .id = STPMIC1_##ids, \ + .n_voltages = 1, \ + .ops = &stpmic1_boost_regul_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 0, \ + .fixed_uV = 5000000, \ + .enable_reg = BST_SW_CR, \ + .enable_mask = BOOST_ENABLED, \ + .enable_val = BOOST_ENABLED, \ + .disable_val = 0, \ + .enable_time = PMIC_ENABLE_TIME_US, \ + .supply_name = #base, \ +} + +#define REG_VBUS_OTG(ids, base) { \ + .name = #ids, \ + .id = STPMIC1_##ids, \ + .n_voltages = 1, \ + .ops = &stpmic1_switch_regul_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 0, \ + .fixed_uV = 5000000, \ + .enable_reg = BST_SW_CR, \ + .enable_mask = USBSW_OTG_SWITCH_ENABLED, \ + .enable_val = USBSW_OTG_SWITCH_ENABLED, \ + .disable_val = 0, \ + .enable_time = PMIC_ENABLE_TIME_US, \ + .supply_name = #base, \ + .active_discharge_reg = BST_SW_CR, \ + .active_discharge_mask = VBUS_OTG_DISCHARGE, \ + .active_discharge_on = VBUS_OTG_DISCHARGE, \ +} + +#define REG_SW_OUT(ids, base) { \ .name = #ids, \ .id = STPMIC1_##ids, \ .n_voltages = 1, \ @@ -326,15 +336,18 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .owner = THIS_MODULE, \ .min_uV = 0, \ .fixed_uV = 5000000, \ - .enable_reg = (reg), \ - .enable_mask = (mask), \ - .enable_val = (val), \ + .enable_reg = BST_SW_CR, \ + .enable_mask = SWIN_SWOUT_ENABLED, \ + .enable_val = SWIN_SWOUT_ENABLED, \ .disable_val = 0, \ .enable_time = PMIC_ENABLE_TIME_US, \ .supply_name = #base, \ + .active_discharge_reg = BST_SW_CR, \ + .active_discharge_mask = SW_OUT_DISCHARGE, \ + .active_discharge_on = SW_OUT_DISCHARGE, \ } -struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = { +static const struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = { [STPMIC1_BUCK1] = { .desc = REG_BUCK(BUCK1, buck1), .icc_reg = BUCKS_ICCTO_CR, @@ -411,23 +424,17 @@ struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = { .mask_reset_mask = BIT(6), }, [STPMIC1_BOOST] = { - .desc = REG_SWITCH(BOOST, boost, BST_SW_CR, - BOOST_ENABLED, - BOOST_ENABLED), + .desc = REG_BOOST(BOOST, boost), .icc_reg = BUCKS_ICCTO_CR, .icc_mask = BIT(6), }, [STPMIC1_VBUS_OTG] = { - .desc = REG_SWITCH(VBUS_OTG, pwr_sw1, BST_SW_CR, - USBSW_OTG_SWITCH_ENABLED, - USBSW_OTG_SWITCH_ENABLED), + .desc = REG_VBUS_OTG(VBUS_OTG, pwr_sw1), .icc_reg = BUCKS_ICCTO_CR, .icc_mask = BIT(4), }, [STPMIC1_SW_OUT] = { - .desc = REG_SWITCH(SW_OUT, pwr_sw2, BST_SW_CR, - SWIN_SWOUT_ENABLED, - SWIN_SWOUT_ENABLED), + .desc = REG_SW_OUT(SW_OUT, pwr_sw2), .icc_reg = BUCKS_ICCTO_CR, .icc_mask = BIT(5), }, @@ -448,8 +455,9 @@ static unsigned int stpmic1_map_mode(unsigned int mode) static unsigned int stpmic1_get_mode(struct regulator_dev *rdev) { int value; + struct regmap *regmap = rdev_get_regmap(rdev); - regmap_read(rdev->regmap, rdev->desc->enable_reg, &value); + regmap_read(regmap, rdev->desc->enable_reg, &value); if (value & STPMIC1_BUCK_MODE_LP) return REGULATOR_MODE_STANDBY; @@ -460,6 +468,7 @@ static unsigned int stpmic1_get_mode(struct regulator_dev *rdev) static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode) { int value; + struct regmap *regmap = rdev_get_regmap(rdev); switch (mode) { case REGULATOR_MODE_NORMAL: @@ -472,17 +481,18 @@ static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode) return -EINVAL; } - return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + return regmap_update_bits(regmap, rdev->desc->enable_reg, STPMIC1_BUCK_MODE_LP, value); } static int stpmic1_set_icc(struct regulator_dev *rdev) { - struct stpmic1_regulator *regul = rdev_get_drvdata(rdev); + struct stpmic1_regulator_cfg *cfg = rdev_get_drvdata(rdev); + struct regmap *regmap = rdev_get_regmap(rdev); /* enable switch off in case of over current */ - return regmap_update_bits(regul->regmap, regul->cfg->icc_reg, - regul->cfg->icc_mask, regul->cfg->icc_mask); + return regmap_update_bits(regmap, cfg->icc_reg, cfg->icc_mask, + cfg->icc_mask); } static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data) @@ -501,46 +511,13 @@ static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int stpmic1_regulator_init(struct platform_device *pdev, - struct regulator_dev *rdev) -{ - struct stpmic1_regulator *regul = rdev_get_drvdata(rdev); - int ret = 0; - - /* set mask reset */ - if (regul->mask_reset && regul->cfg->mask_reset_reg != 0) { - ret = regmap_update_bits(regul->regmap, - regul->cfg->mask_reset_reg, - regul->cfg->mask_reset_mask, - regul->cfg->mask_reset_mask); - if (ret) { - dev_err(&pdev->dev, "set mask reset failed\n"); - return ret; - } - } - - /* setup an irq handler for over-current detection */ - if (regul->irq_curlim > 0) { - ret = devm_request_threaded_irq(&pdev->dev, - regul->irq_curlim, NULL, - stpmic1_curlim_irq_handler, - IRQF_ONESHOT | IRQF_SHARED, - pdev->name, rdev); - if (ret) { - dev_err(&pdev->dev, "Request IRQ failed\n"); - return ret; - } - } - return 0; -} - #define MATCH(_name, _id) \ [STPMIC1_##_id] = { \ .name = #_name, \ .desc = &stpmic1_regulator_cfgs[STPMIC1_##_id].desc, \ } -static struct of_regulator_match stpmic1_regulators_matches[] = { +static struct of_regulator_match stpmic1_matches[] = { MATCH(buck1, BUCK1), MATCH(buck2, BUCK2), MATCH(buck3, BUCK3), @@ -557,94 +534,75 @@ static struct of_regulator_match stpmic1_regulators_matches[] = { MATCH(pwr_sw2, SW_OUT), }; -static int stpmic1_regulator_parse_dt(void *driver_data) -{ - struct stpmic1_regulator *regul = - (struct stpmic1_regulator *)driver_data; - - if (!regul) - return -EINVAL; - - if (of_get_property(regul->reg_node, "st,mask-reset", NULL)) - regul->mask_reset = 1; - - regul->irq_curlim = of_irq_get(regul->reg_node, 0); - - return 0; -} - -static struct -regulator_dev *stpmic1_regulator_register(struct platform_device *pdev, int id, - struct regulator_init_data *init_data, - struct stpmic1_regulator *regul) +static int stpmic1_regulator_register(struct platform_device *pdev, int id, + struct of_regulator_match *match, + const struct stpmic1_regulator_cfg *cfg) { struct stpmic1 *pmic_dev = dev_get_drvdata(pdev->dev.parent); struct regulator_dev *rdev; struct regulator_config config = {}; + int ret = 0; + int irq; config.dev = &pdev->dev; - config.init_data = init_data; - config.of_node = stpmic1_regulators_matches[id].of_node; + config.init_data = match->init_data; + config.of_node = match->of_node; config.regmap = pmic_dev->regmap; - config.driver_data = regul; - - regul->regul_id = id; - regul->reg_node = config.of_node; - regul->cfg = &stpmic1_regulator_cfgs[id]; - regul->regmap = pmic_dev->regmap; + config.driver_data = (void *)cfg; - rdev = devm_regulator_register(&pdev->dev, ®ul->cfg->desc, &config); + rdev = devm_regulator_register(&pdev->dev, &cfg->desc, &config); if (IS_ERR(rdev)) { dev_err(&pdev->dev, "failed to register %s regulator\n", - regul->cfg->desc.name); + cfg->desc.name); + return PTR_ERR(rdev); + } + + /* set mask reset */ + if (of_get_property(config.of_node, "st,mask-reset", NULL) && + cfg->mask_reset_reg != 0) { + ret = regmap_update_bits(pmic_dev->regmap, + cfg->mask_reset_reg, + cfg->mask_reset_mask, + cfg->mask_reset_mask); + if (ret) { + dev_err(&pdev->dev, "set mask reset failed\n"); + return ret; + } } - return rdev; + /* setup an irq handler for over-current detection */ + irq = of_irq_get(config.of_node, 0); + if (irq > 0) { + ret = devm_request_threaded_irq(&pdev->dev, + irq, NULL, + stpmic1_curlim_irq_handler, + IRQF_ONESHOT | IRQF_SHARED, + pdev->name, rdev); + if (ret) { + dev_err(&pdev->dev, "Request IRQ failed\n"); + return ret; + } + } + return 0; } static int stpmic1_regulator_probe(struct platform_device *pdev) { - struct regulator_dev *rdev; - struct stpmic1_regulator *regul; - struct regulator_init_data *init_data; - struct device_node *np; int i, ret; - np = pdev->dev.of_node; - - ret = of_regulator_match(&pdev->dev, np, - stpmic1_regulators_matches, - ARRAY_SIZE(stpmic1_regulators_matches)); + ret = of_regulator_match(&pdev->dev, pdev->dev.of_node, stpmic1_matches, + ARRAY_SIZE(stpmic1_matches)); if (ret < 0) { dev_err(&pdev->dev, "Error in PMIC regulator device tree node"); return ret; } - regul = devm_kzalloc(&pdev->dev, ARRAY_SIZE(stpmic1_regulator_cfgs) * - sizeof(struct stpmic1_regulator), - GFP_KERNEL); - if (!regul) - return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(stpmic1_regulator_cfgs); i++) { - /* Parse DT & find regulators to register */ - init_data = stpmic1_regulators_matches[i].init_data; - if (init_data) - init_data->regulator_init = &stpmic1_regulator_parse_dt; - - rdev = stpmic1_regulator_register(pdev, i, init_data, regul); - if (IS_ERR(rdev)) - return PTR_ERR(rdev); - - ret = stpmic1_regulator_init(pdev, rdev); - if (ret) { - dev_err(&pdev->dev, - "failed to initialize regulator %d\n", ret); + ret = stpmic1_regulator_register(pdev, i, &stpmic1_matches[i], + &stpmic1_regulator_cfgs[i]); + if (ret < 0) return ret; - } - - regul++; } dev_dbg(&pdev->dev, "stpmic1_regulator driver probed\n"); diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c index 6209beee1018..95708d34876b 100644 --- a/drivers/regulator/tps65218-regulator.c +++ b/drivers/regulator/tps65218-regulator.c @@ -188,7 +188,8 @@ static struct regulator_ops tps65218_ldo1_dcdc34_ops = { .set_suspend_disable = tps65218_pmic_set_suspend_disable, }; -static const int ls3_currents[] = { 100, 200, 500, 1000 }; +static const int ls3_currents[] = { 100000, 200000, 500000, 1000000 }; + static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev, int lim_uA) @@ -204,7 +205,8 @@ static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev, return -EINVAL; return tps65218_set_bits(tps, dev->desc->csel_reg, dev->desc->csel_mask, - index << 2, TPS65218_PROTECT_L1); + index << __builtin_ctz(dev->desc->csel_mask), + TPS65218_PROTECT_L1); } static int tps65218_pmic_set_current_limit(struct regulator_dev *dev, @@ -214,7 +216,7 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev, unsigned int num_currents = ARRAY_SIZE(ls3_currents); struct tps65218 *tps = rdev_get_drvdata(dev); - while (index < num_currents && ls3_currents[index] < max_uA) + while (index < num_currents && ls3_currents[index] <= max_uA) index++; index--; @@ -223,7 +225,8 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev, return -EINVAL; return tps65218_set_bits(tps, dev->desc->csel_reg, dev->desc->csel_mask, - index << 2, TPS65218_PROTECT_L1); + index << __builtin_ctz(dev->desc->csel_mask), + TPS65218_PROTECT_L1); } static int tps65218_pmic_get_current_limit(struct regulator_dev *dev) @@ -236,12 +239,13 @@ static int tps65218_pmic_get_current_limit(struct regulator_dev *dev) if (retval < 0) return retval; - index = (index & dev->desc->csel_mask) >> 2; + index = (index & dev->desc->csel_mask) >> + __builtin_ctz(dev->desc->csel_mask); return ls3_currents[index]; } -static struct regulator_ops tps65218_ls3_ops = { +static struct regulator_ops tps65218_ls23_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = tps65218_pmic_enable, .disable = tps65218_pmic_disable, @@ -303,8 +307,13 @@ static const struct regulator_desc regulators[] = { TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges, 2, 0, 0, TPS65218_REG_SEQ6, TPS65218_SEQ6_LDO1_SEQ_MASK), + TPS65218_REGULATOR("LS2", "regulator-ls2", TPS65218_LS_2, + REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0, + TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS2_EN, + TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS2ILIM_MASK, + NULL, 0, 0, 0, 0, 0), TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3, - REGULATOR_CURRENT, tps65218_ls3_ops, 0, 0, 0, + REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0, TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN, TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK, NULL, 0, 0, 0, 0, 0), diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 884c7505ed91..402ea43c77d1 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -576,14 +576,9 @@ static int twlreg_probe(struct platform_device *pdev) struct regulator_init_data *initdata; struct regulation_constraints *c; struct regulator_dev *rdev; - const struct of_device_id *match; struct regulator_config config = { }; - match = of_match_device(twl_of_match, &pdev->dev); - if (!match) - return -ENODEV; - - template = match->data; + template = of_device_get_match_data(&pdev->dev); if (!template) return -ENODEV; diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index 219cbd910dbf..15f19df6bc5d 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -31,9 +31,6 @@ struct twlreg_info { /* twl resource ID, for resource control state machine */ u8 id; - /* chip constraints on regulator behavior */ - u16 min_mV; - u8 flags; /* used by regulator core */ @@ -247,32 +244,11 @@ static int twl6030coresmps_get_voltage(struct regulator_dev *rdev) return -ENODEV; } -static struct regulator_ops twl6030coresmps_ops = { +static const struct regulator_ops twl6030coresmps_ops = { .set_voltage = twl6030coresmps_set_voltage, .get_voltage = twl6030coresmps_get_voltage, }; -static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - switch (sel) { - case 0: - return 0; - case 1 ... 24: - /* Linear mapping from 00000001 to 00011000: - * Absolute voltage value = 1.0 V + 0.1 V Ă— (sel – 00000001) - */ - return (info->min_mV + 100 * (sel - 1)) * 1000; - case 25 ... 30: - return -EINVAL; - case 31: - return 2750000; - default: - return -EINVAL; - } -} - static int twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { @@ -290,8 +266,8 @@ static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev) return vsel; } -static struct regulator_ops twl6030ldo_ops = { - .list_voltage = twl6030ldo_list_voltage, +static const struct regulator_ops twl6030ldo_ops = { + .list_voltage = regulator_list_voltage_linear_range, .set_voltage_sel = twl6030ldo_set_voltage_sel, .get_voltage_sel = twl6030ldo_get_voltage_sel, @@ -305,7 +281,7 @@ static struct regulator_ops twl6030ldo_ops = { .get_status = twl6030reg_get_status, }; -static struct regulator_ops twl6030fixed_ops = { +static const struct regulator_ops twl6030fixed_ops = { .list_voltage = regulator_list_voltage_linear, .enable = twl6030reg_enable, @@ -496,7 +472,7 @@ static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev) return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS); } -static struct regulator_ops twlsmps_ops = { +static const struct regulator_ops twlsmps_ops = { .list_voltage = twl6030smps_list_voltage, .map_voltage = twl6030smps_map_voltage, @@ -513,6 +489,11 @@ static struct regulator_ops twlsmps_ops = { }; /*----------------------------------------------------------------------*/ +static const struct regulator_linear_range twl6030ldo_linear_range[] = { + REGULATOR_LINEAR_RANGE(0, 0, 0, 0), + REGULATOR_LINEAR_RANGE(1000000, 1, 24, 100000), + REGULATOR_LINEAR_RANGE(2750000, 31, 31, 0), +}; #define TWL6030_ADJUSTABLE_SMPS(label) \ static const struct twlreg_info TWL6030_INFO_##label = { \ @@ -525,28 +506,30 @@ static const struct twlreg_info TWL6030_INFO_##label = { \ }, \ } -#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts) \ +#define TWL6030_ADJUSTABLE_LDO(label, offset) \ static const struct twlreg_info TWL6030_INFO_##label = { \ .base = offset, \ - .min_mV = min_mVolts, \ .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ .n_voltages = 32, \ + .linear_ranges = twl6030ldo_linear_range, \ + .n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } -#define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts) \ +#define TWL6032_ADJUSTABLE_LDO(label, offset) \ static const struct twlreg_info TWL6032_INFO_##label = { \ .base = offset, \ - .min_mV = min_mVolts, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ .n_voltages = 32, \ + .linear_ranges = twl6030ldo_linear_range, \ + .n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ @@ -557,7 +540,6 @@ static const struct twlreg_info TWL6032_INFO_##label = { \ static const struct twlreg_info TWLFIXED_INFO_##label = { \ .base = offset, \ .id = 0, \ - .min_mV = mVolts, \ .desc = { \ .name = #label, \ .id = TWL6030##_REG_##label, \ @@ -574,7 +556,6 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \ #define TWL6032_ADJUSTABLE_SMPS(label, offset) \ static const struct twlreg_info TWLSMPS_INFO_##label = { \ .base = offset, \ - .min_mV = 600, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ @@ -592,22 +573,22 @@ static const struct twlreg_info TWLSMPS_INFO_##label = { \ TWL6030_ADJUSTABLE_SMPS(VDD1); TWL6030_ADJUSTABLE_SMPS(VDD2); TWL6030_ADJUSTABLE_SMPS(VDD3); -TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000); -TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000); -TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000); -TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000); -TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000); -TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000); +TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54); +TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58); +TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c); +TWL6030_ADJUSTABLE_LDO(VMMC, 0x68); +TWL6030_ADJUSTABLE_LDO(VPP, 0x6c); +TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74); /* 6025 are renamed compared to 6030 versions */ -TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000); -TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000); -TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000); -TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000); -TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000); -TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000); -TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000); -TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000); -TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000); +TWL6032_ADJUSTABLE_LDO(LDO2, 0x54); +TWL6032_ADJUSTABLE_LDO(LDO4, 0x58); +TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c); +TWL6032_ADJUSTABLE_LDO(LDO5, 0x68); +TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c); +TWL6032_ADJUSTABLE_LDO(LDO7, 0x74); +TWL6032_ADJUSTABLE_LDO(LDO6, 0x60); +TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64); +TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70); TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0); TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0); TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0); @@ -687,14 +668,9 @@ static int twlreg_probe(struct platform_device *pdev) struct regulator_init_data *initdata; struct regulation_constraints *c; struct regulator_dev *rdev; - const struct of_device_id *match; struct regulator_config config = { }; - match = of_match_device(twl_of_match, &pdev->dev); - if (!match) - return -ENODEV; - - template = match->data; + template = of_device_get_match_data(&pdev->dev); if (!template) return -ENODEV; diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c index abf22acbd13e..9026d5a3e964 100644 --- a/drivers/regulator/uniphier-regulator.c +++ b/drivers/regulator/uniphier-regulator.c @@ -32,7 +32,7 @@ struct uniphier_regulator_priv { const struct uniphier_regulator_soc_data *data; }; -static struct regulator_ops uniphier_regulator_ops = { +static const struct regulator_ops uniphier_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -87,8 +87,10 @@ static int uniphier_regulator_probe(struct platform_device *pdev) } regmap = devm_regmap_init_mmio(dev, base, priv->data->regconf); - if (IS_ERR(regmap)) - return PTR_ERR(regmap); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto out_rst_assert; + } config.dev = dev; config.driver_data = priv; diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 5a5bc4bb08d2..12b422373580 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c @@ -205,33 +205,10 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data) * BUCKV specifics */ -static int wm831x_buckv_list_voltage(struct regulator_dev *rdev, - unsigned selector) -{ - if (selector <= 0x8) - return 600000; - if (selector <= WM831X_BUCKV_MAX_SELECTOR) - return 600000 + ((selector - 0x8) * 12500); - return -EINVAL; -} - -static int wm831x_buckv_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - u16 vsel; - - if (min_uV < 600000) - vsel = 0; - else if (min_uV <= 1800000) - vsel = DIV_ROUND_UP(min_uV - 600000, 12500) + 8; - else - return -EINVAL; - - if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV) - return -EINVAL; - - return vsel; -} +static const struct regulator_linear_range wm831x_buckv_ranges[] = { + REGULATOR_LINEAR_RANGE(600000, 0, 0x7, 0), + REGULATOR_LINEAR_RANGE(600000, 0x8, 0x68, 12500), +}; static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) { @@ -309,7 +286,7 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev, u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; int vsel; - vsel = wm831x_buckv_map_voltage(rdev, uV, uV); + vsel = regulator_map_voltage_linear_range(rdev, uV, uV); if (vsel < 0) return vsel; @@ -327,52 +304,18 @@ static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev) } /* Current limit options */ -static u16 wm831x_dcdc_ilim[] = { - 125, 250, 375, 500, 625, 750, 875, 1000 +static const unsigned int wm831x_dcdc_ilim[] = { + 125000, 250000, 375000, 500000, 625000, 750000, 875000, 1000000 }; -static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev, - int min_uA, int max_uA) -{ - struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); - struct wm831x *wm831x = dcdc->wm831x; - u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; - int i; - - for (i = ARRAY_SIZE(wm831x_dcdc_ilim) - 1; i >= 0; i--) { - if ((min_uA <= wm831x_dcdc_ilim[i]) && - (wm831x_dcdc_ilim[i] <= max_uA)) - return wm831x_set_bits(wm831x, reg, - WM831X_DC1_HC_THR_MASK, - i << WM831X_DC1_HC_THR_SHIFT); - } - - return -EINVAL; -} - -static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev) -{ - struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); - struct wm831x *wm831x = dcdc->wm831x; - u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; - int val; - - val = wm831x_reg_read(wm831x, reg); - if (val < 0) - return val; - - val = (val & WM831X_DC1_HC_THR_MASK) >> WM831X_DC1_HC_THR_SHIFT; - return wm831x_dcdc_ilim[val]; -} - static const struct regulator_ops wm831x_buckv_ops = { .set_voltage_sel = wm831x_buckv_set_voltage_sel, .get_voltage_sel = wm831x_buckv_get_voltage_sel, - .list_voltage = wm831x_buckv_list_voltage, - .map_voltage = wm831x_buckv_map_voltage, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, .set_suspend_voltage = wm831x_buckv_set_suspend_voltage, - .set_current_limit = wm831x_buckv_set_current_limit, - .get_current_limit = wm831x_buckv_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, @@ -492,10 +435,16 @@ static int wm831x_buckv_probe(struct platform_device *pdev) dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1; + dcdc->desc.linear_ranges = wm831x_buckv_ranges; + dcdc->desc.n_linear_ranges = ARRAY_SIZE(wm831x_buckv_ranges); dcdc->desc.ops = &wm831x_buckv_ops; dcdc->desc.owner = THIS_MODULE; dcdc->desc.enable_reg = WM831X_DCDC_ENABLE; dcdc->desc.enable_mask = 1 << id; + dcdc->desc.csel_reg = dcdc->base + WM831X_DCDC_CONTROL_2; + dcdc->desc.csel_mask = WM831X_DC1_HC_THR_MASK; + dcdc->desc.n_current_limits = ARRAY_SIZE(wm831x_dcdc_ilim); + dcdc->desc.curr_table = wm831x_dcdc_ilim; ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); if (ret < 0) { diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index a3c20e3a8b7c..3337b1e80412 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2009,7 +2009,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) struct Scsi_Host *host = NULL; TW_Device_Extension *tw_dev; unsigned long mem_addr, mem_len; - int retval = -ENODEV; + int retval; retval = pci_enable_device(pdev); if (retval) { @@ -2020,8 +2020,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); retval = -ENODEV; goto out_disable_device; @@ -2240,8 +2242,10 @@ static int twa_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index cd096104bcec..dda6fa857709 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1573,8 +1573,10 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); retval = -ENODEV; goto out_disable_device; @@ -1805,8 +1807,10 @@ static int twl_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 07efcb9b5b94..bbdae67774f0 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -769,9 +769,11 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (err) goto Err_remove; - err = -ENODEV; - if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) { + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); + if (err) { + err = -ENODEV; asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); goto Err_remove; } diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 42a0caf6740d..88880a66a189 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -727,7 +727,7 @@ bfad_init_timer(struct bfad_s *bfad) int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) { - int rc = -ENODEV; + int rc = -ENODEV; if (pci_enable_device(pdev)) { printk(KERN_ERR "pci_enable_device fail %p\n", pdev); @@ -739,8 +739,12 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) pci_set_master(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + + if (rc) { + rc = -ENODEV; printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev); goto out_release_region; } @@ -1534,6 +1538,7 @@ bfad_pci_slot_reset(struct pci_dev *pdev) { struct bfad_s *bfad = pci_get_drvdata(pdev); u8 byte; + int rc; dev_printk(KERN_ERR, &pdev->dev, "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags); @@ -1561,8 +1566,11 @@ bfad_pci_slot_reset(struct pci_dev *pdev) pci_save_state(pdev); pci_set_master(pdev); - if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32))) + rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, + DMA_BIT_MASK(32)); + if (rc) goto out_disable_device; if (restart_bfa(bfad) == -1) diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index cf629380a981..616b25bf7941 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -210,8 +210,11 @@ csio_pci_init(struct pci_dev *pdev, int *bars) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rv) + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rv) { + rv = -ENODEV; dev_err(&pdev->dev, "No suitable DMA available.\n"); goto err_release_regions; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index eed7fc5b3389..bc17fa0d8375 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -2323,6 +2323,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, struct Scsi_Host *shost; struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; + int error; shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); if (!shost) { @@ -2343,8 +2344,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + if (error) { dev_err(dev, "No usable DMA addressing method\n"); goto err_out; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index c92b3822c408..e0570fd8466e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2447,10 +2447,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) goto err_out_disable_device; - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) { dev_err(dev, "No usable DMA addressing method\n"); - rc = -EIO; + rc = -ENODEV; goto err_out_regions; } diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 3eedfd4f8f57..251c084a6ff0 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -1292,6 +1292,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) dma_addr_t start_phy; void *start_virt; u32 offset, i, req_size; + int rc; dprintk("hptiop_probe(%p)\n", pcidev); @@ -1308,9 +1309,12 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) /* Enable 64bit DMA if possible */ iop_ops = (struct hptiop_adapter_ops *)id->driver_data; - if (dma_set_mask(&pcidev->dev, - DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) || - dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask(&pcidev->dev, + DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)); + if (rc) + rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)); + + if (rc) { printk(KERN_ERR "hptiop: fail to set dma_mask\n"); goto disable_pci_device; } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b8d325ce8754..120fc520f27a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) return -ENODATA; + spin_lock_bh(&conn->session->back_lock); + if (conn->task == NULL) { + spin_unlock_bh(&conn->session->back_lock); + return -ENODATA; + } __iscsi_get_task(task); + spin_unlock_bh(&conn->session->back_lock); spin_unlock_bh(&conn->session->frwd_lock); rc = conn->session->tt->xmit_task(task); spin_lock_bh(&conn->session->frwd_lock); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 17eb4185f29d..f21c93bbb35c 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev( rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; + rphy->identify.phy_identifier = phy_id; child->rphy = rphy; get_device(&rphy->dev); @@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev( child->rphy = rphy; get_device(&rphy->dev); + rphy->identify.phy_identifier = phy_id; sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index bede11e16349..e1129260ed18 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -7361,15 +7361,18 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) unsigned long bar0map_len, bar2map_len; int i, hbq_count; void *ptr; - int error = -ENODEV; + int error; if (!pdev) - return error; + return -ENODEV; /* Set the device DMA mask size */ - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (error) return error; + error = -ENODEV; /* Get the bus address of Bar0 and Bar2 and the number of bytes * required by each mapping. @@ -9742,11 +9745,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) uint32_t if_type; if (!pdev) - return error; + return -ENODEV; /* Set the device DMA mask size */ - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (error) return error; /* diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d65ac584eba..a6828391d6b3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) set_host_byte(cmd, DID_OK); return BLK_STS_TARGET; case DID_NEXUS_FAILURE: + set_host_byte(cmd, DID_OK); return BLK_STS_NEXUS; case DID_ALLOC_FAILURE: set_host_byte(cmd, DID_OK); @@ -2597,7 +2598,6 @@ void scsi_device_resume(struct scsi_device *sdev) * device deleted during suspend) */ mutex_lock(&sdev->state_mutex); - WARN_ON_ONCE(!sdev->quiesced_by); sdev->quiesced_by = NULL; blk_clear_pm_only(sdev->request_queue); if (sdev->sdev_state == SDEV_QUIESCE) diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index fff86940388b..a340af797a85 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, return -EOPNOTSUPP; /* - * Get a reply buffer for the number of requested zones plus a header. - * For ATA, buffers must be aligned to 512B. + * Get a reply buffer for the number of requested zones plus a header, + * without exceeding the device maximum command size. For ATA disks, + * buffers must be aligned to 512B. */ - buflen = roundup((nrz + 1) * 64, 512); + buflen = min(queue_max_hw_sectors(disk->queue) << 9, + roundup((nrz + 1) * 64, 512)); buf = kmalloc(buflen, gfp_mask); if (!buf) return -ENOMEM; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 9f89cb134549..f761655e2a36 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -63,7 +63,7 @@ config SPI_ALTERA config SPI_ATH79 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" - depends on ATH79 && GPIOLIB + depends on ATH79 || COMPILE_TEST select SPI_BITBANG help This enables support for the SPI controller present on the @@ -268,6 +268,27 @@ config SPI_FSL_LPSPI help This enables Freescale i.MX LPSPI controllers in master mode. +config SPI_FSL_QUADSPI + tristate "Freescale QSPI controller" + depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST + depends on HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + Up to four flash chips can be connected on two buses with two + chipselects each. + This controller does not support generic SPI messages. It only + supports the high-level SPI memory interface. + +config SPI_NXP_FLEXSPI + tristate "NXP Flex SPI controller" + depends on ARCH_LAYERSCAPE || HAS_IOMEM + help + This enables support for the Flex SPI controller in master mode. + Up to four slave devices can be connected on two buses with two + chipselects each. + This controller does not support generic SPI messages and only + supports the high-level SPI memory interface. + config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GPIOLIB || COMPILE_TEST @@ -296,8 +317,7 @@ config SPI_IMX depends on ARCH_MXC || COMPILE_TEST select SPI_BITBANG help - This enables using the Freescale i.MX SPI controllers in master - mode. + This enables support for the Freescale i.MX SPI controllers. config SPI_JCORE tristate "J-Core SPI Master" @@ -372,7 +392,7 @@ config SPI_FSL_DSPI depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST help This enables support for the Freescale DSPI controller in master - mode. VF610 platform uses the controller. + mode. VF610, LS1021A and ColdFire platforms uses the controller. config SPI_FSL_ESPI tristate "Freescale eSPI controller" @@ -631,6 +651,12 @@ config SPI_SH_HSPI help SPI driver for SuperH HSPI blocks. +config SPI_SIFIVE + tristate "SiFive SPI controller" + depends on HAS_IOMEM + help + This exposes the SPI controller IP from SiFive. + config SPI_SIRF tristate "CSR SiRFprimaII SPI controller" depends on SIRF_DMA @@ -665,7 +691,7 @@ config SPI_STM32 tristate "STMicroelectronics STM32 SPI controller" depends on ARCH_STM32 || COMPILE_TEST help - SPI driver for STMicroelectonics STM32 SoCs. + SPI driver for STMicroelectronics STM32 SoCs. STM32 SPI controller supports DMA and PIO modes. When DMA is not available, the driver automatically falls back to diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index f29627040dfb..d8fc03c9faa2 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o +obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o @@ -63,6 +64,7 @@ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o obj-$(CONFIG_SPI_MXS) += spi-mxs.o obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o +obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o @@ -93,6 +95,7 @@ obj-$(CONFIG_SPI_SH) += spi-sh.o obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o +obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o obj-$(CONFIG_SPI_SIRF) += spi-sirf.o obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o obj-$(CONFIG_SPI_SPRD) += spi-sprd.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index ddc712410812..fffc21cd5f79 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Atmel QSPI Controller * @@ -7,31 +8,19 @@ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com> * Author: Piotr Bugalski <bugalski.piotr@gmail.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - * * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. */ -#include <linux/kernel.h> #include <linux/clk.h> -#include <linux/module.h> -#include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> -#include <linux/of.h> - #include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/spi/spi-mem.h> /* QSPI register offsets */ @@ -47,7 +36,9 @@ #define QSPI_IAR 0x0030 /* Instruction Address Register */ #define QSPI_ICR 0x0034 /* Instruction Code Register */ +#define QSPI_WICR 0x0034 /* Write Instruction Code Register */ #define QSPI_IFR 0x0038 /* Instruction Frame Register */ +#define QSPI_RICR 0x003C /* Read Instruction Code Register */ #define QSPI_SMR 0x0040 /* Scrambling Mode Register */ #define QSPI_SKR 0x0044 /* Scrambling Key Register */ @@ -100,7 +91,7 @@ #define QSPI_SCR_DLYBS_MASK GENMASK(23, 16) #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) -/* Bitfields in QSPI_ICR (Instruction Code Register) */ +/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */ #define QSPI_ICR_INST_MASK GENMASK(7, 0) #define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK) #define QSPI_ICR_OPT_MASK GENMASK(23, 16) @@ -125,14 +116,12 @@ #define QSPI_IFR_OPTL_4BIT (2 << 8) #define QSPI_IFR_OPTL_8BIT (3 << 8) #define QSPI_IFR_ADDRL BIT(10) -#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12) -#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13) +#define QSPI_IFR_TFRTYP_MEM BIT(12) +#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13) #define QSPI_IFR_CRM BIT(14) #define QSPI_IFR_NBDUM_MASK GENMASK(20, 16) #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) +#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */ /* Bitfields in QSPI_SMR (Scrambling Mode Register) */ #define QSPI_SMR_SCREN BIT(0) @@ -148,24 +137,31 @@ #define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8) #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) +struct atmel_qspi_caps { + bool has_qspick; + bool has_ricr; +}; struct atmel_qspi { void __iomem *regs; void __iomem *mem; - struct clk *clk; + struct clk *pclk; + struct clk *qspick; struct platform_device *pdev; + const struct atmel_qspi_caps *caps; u32 pending; + u32 mr; struct completion cmd_completion; }; -struct qspi_mode { +struct atmel_qspi_mode { u8 cmd_buswidth; u8 addr_buswidth; u8 data_buswidth; u32 config; }; -static const struct qspi_mode sama5d2_qspi_modes[] = { +static const struct atmel_qspi_mode atmel_qspi_modes[] = { { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI }, { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT }, { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT }, @@ -175,19 +171,8 @@ static const struct qspi_mode sama5d2_qspi_modes[] = { { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, }; -/* Register access functions */ -static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg) -{ - return readl_relaxed(aq->regs + reg); -} - -static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value) -{ - writel_relaxed(value, aq->regs + reg); -} - -static inline bool is_compatible(const struct spi_mem_op *op, - const struct qspi_mode *mode) +static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op, + const struct atmel_qspi_mode *mode) { if (op->cmd.buswidth != mode->cmd_buswidth) return false; @@ -201,21 +186,21 @@ static inline bool is_compatible(const struct spi_mem_op *op, return true; } -static int find_mode(const struct spi_mem_op *op) +static int atmel_qspi_find_mode(const struct spi_mem_op *op) { u32 i; - for (i = 0; i < ARRAY_SIZE(sama5d2_qspi_modes); i++) - if (is_compatible(op, &sama5d2_qspi_modes[i])) + for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++) + if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i])) return i; - return -1; + return -ENOTSUPP; } static bool atmel_qspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { - if (find_mode(op) < 0) + if (atmel_qspi_find_mode(op) < 0) return false; /* special case not supported by hardware */ @@ -226,29 +211,37 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem, return true; } -static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +static int atmel_qspi_set_cfg(struct atmel_qspi *aq, + const struct spi_mem_op *op, u32 *offset) { - struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master); - int mode; + u32 iar, icr, ifr; u32 dummy_cycles = 0; - u32 iar, icr, ifr, sr; - int err = 0; + int mode; iar = 0; icr = QSPI_ICR_INST(op->cmd.opcode); ifr = QSPI_IFR_INSTEN; - qspi_writel(aq, QSPI_MR, QSPI_MR_SMM); - - mode = find_mode(op); + mode = atmel_qspi_find_mode(op); if (mode < 0) - return -ENOTSUPP; - - ifr |= sama5d2_qspi_modes[mode].config; + return mode; + ifr |= atmel_qspi_modes[mode].config; if (op->dummy.buswidth && op->dummy.nbytes) dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth; + /* + * The controller allows 24 and 32-bit addressing while NAND-flash + * requires 16-bit long. Handling 8-bit long addresses is done using + * the option field. For the 16-bit addresses, the workaround depends + * of the number of requested dummy bits. If there are 8 or more dummy + * cycles, the address is shifted and sent with the first dummy byte. + * Otherwise opcode is disabled and the first byte of the address + * contains the command opcode (works only if the opcode and address + * use the same buswidth). The limitation is when the 16-bit address is + * used without enough dummy cycles and the opcode is using a different + * buswidth than the address. + */ if (op->addr.buswidth) { switch (op->addr.nbytes) { case 0: @@ -282,6 +275,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) } } + /* offset of the data access in the QSPI memory space */ + *offset = iar; + /* Set number of dummy cycles */ if (dummy_cycles) ifr |= QSPI_IFR_NBDUM(dummy_cycles); @@ -290,49 +286,82 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) if (op->data.nbytes) ifr |= QSPI_IFR_DATAEN; - if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) - ifr |= QSPI_IFR_TFRTYP_TRSFR_READ; - else - ifr |= QSPI_IFR_TFRTYP_TRSFR_WRITE; + /* + * If the QSPI controller is set in regular SPI mode, set it in + * Serial Memory Mode (SMM). + */ + if (aq->mr != QSPI_MR_SMM) { + writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); + aq->mr = QSPI_MR_SMM; + } /* Clear pending interrupts */ - (void)qspi_readl(aq, QSPI_SR); + (void)readl_relaxed(aq->regs + QSPI_SR); + + if (aq->caps->has_ricr) { + if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN) + ifr |= QSPI_IFR_APBTFRTYP_READ; - /* Set QSPI Instruction Frame registers */ - qspi_writel(aq, QSPI_IAR, iar); - qspi_writel(aq, QSPI_ICR, icr); - qspi_writel(aq, QSPI_IFR, ifr); + /* Set QSPI Instruction Frame registers */ + writel_relaxed(iar, aq->regs + QSPI_IAR); + if (op->data.dir == SPI_MEM_DATA_IN) + writel_relaxed(icr, aq->regs + QSPI_RICR); + else + writel_relaxed(icr, aq->regs + QSPI_WICR); + writel_relaxed(ifr, aq->regs + QSPI_IFR); + } else { + if (op->data.dir == SPI_MEM_DATA_OUT) + ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR; + + /* Set QSPI Instruction Frame registers */ + writel_relaxed(iar, aq->regs + QSPI_IAR); + writel_relaxed(icr, aq->regs + QSPI_ICR); + writel_relaxed(ifr, aq->regs + QSPI_IFR); + } + + return 0; +} + +static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master); + u32 sr, offset; + int err; + + err = atmel_qspi_set_cfg(aq, op, &offset); + if (err) + return err; /* Skip to the final steps if there is no data */ if (op->data.nbytes) { /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ - (void)qspi_readl(aq, QSPI_IFR); + (void)readl_relaxed(aq->regs + QSPI_IFR); /* Send/Receive data */ if (op->data.dir == SPI_MEM_DATA_IN) - _memcpy_fromio(op->data.buf.in, - aq->mem + iar, op->data.nbytes); + _memcpy_fromio(op->data.buf.in, aq->mem + offset, + op->data.nbytes); else - _memcpy_toio(aq->mem + iar, - op->data.buf.out, op->data.nbytes); + _memcpy_toio(aq->mem + offset, op->data.buf.out, + op->data.nbytes); /* Release the chip-select */ - qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER); + writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR); } /* Poll INSTRuction End status */ - sr = qspi_readl(aq, QSPI_SR); + sr = readl_relaxed(aq->regs + QSPI_SR); if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) return err; /* Wait for INSTRuction End interrupt */ reinit_completion(&aq->cmd_completion); aq->pending = sr & QSPI_SR_CMD_COMPLETED; - qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED); + writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER); if (!wait_for_completion_timeout(&aq->cmd_completion, msecs_to_jiffies(1000))) err = -ETIMEDOUT; - qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED); + writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR); return err; } @@ -361,7 +390,7 @@ static int atmel_qspi_setup(struct spi_device *spi) if (!spi->max_speed_hz) return -EINVAL; - src_rate = clk_get_rate(aq->clk); + src_rate = clk_get_rate(aq->pclk); if (!src_rate) return -EINVAL; @@ -371,7 +400,7 @@ static int atmel_qspi_setup(struct spi_device *spi) scbr--; scr = QSPI_SCR_SCBR(scbr); - qspi_writel(aq, QSPI_SCR, scr); + writel_relaxed(scr, aq->regs + QSPI_SCR); return 0; } @@ -379,21 +408,25 @@ static int atmel_qspi_setup(struct spi_device *spi) static int atmel_qspi_init(struct atmel_qspi *aq) { /* Reset the QSPI controller */ - qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST); + writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR); + + /* Set the QSPI controller by default in Serial Memory Mode */ + writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); + aq->mr = QSPI_MR_SMM; /* Enable the QSPI controller */ - qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN); + writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR); return 0; } static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) { - struct atmel_qspi *aq = (struct atmel_qspi *)dev_id; + struct atmel_qspi *aq = dev_id; u32 status, mask, pending; - status = qspi_readl(aq, QSPI_SR); - mask = qspi_readl(aq, QSPI_IMR); + status = readl_relaxed(aq->regs + QSPI_SR); + mask = readl_relaxed(aq->regs + QSPI_IMR); pending = status & mask; if (!pending) @@ -449,44 +482,74 @@ static int atmel_qspi_probe(struct platform_device *pdev) } /* Get the peripheral clock */ - aq->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(aq->clk)) { + aq->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(aq->pclk)) + aq->pclk = devm_clk_get(&pdev->dev, NULL); + + if (IS_ERR(aq->pclk)) { dev_err(&pdev->dev, "missing peripheral clock\n"); - err = PTR_ERR(aq->clk); + err = PTR_ERR(aq->pclk); goto exit; } /* Enable the peripheral clock */ - err = clk_prepare_enable(aq->clk); + err = clk_prepare_enable(aq->pclk); if (err) { dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); goto exit; } + aq->caps = of_device_get_match_data(&pdev->dev); + if (!aq->caps) { + dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); + err = -EINVAL; + goto exit; + } + + if (aq->caps->has_qspick) { + /* Get the QSPI system clock */ + aq->qspick = devm_clk_get(&pdev->dev, "qspick"); + if (IS_ERR(aq->qspick)) { + dev_err(&pdev->dev, "missing system clock\n"); + err = PTR_ERR(aq->qspick); + goto disable_pclk; + } + + /* Enable the QSPI system clock */ + err = clk_prepare_enable(aq->qspick); + if (err) { + dev_err(&pdev->dev, + "failed to enable the QSPI system clock\n"); + goto disable_pclk; + } + } + /* Request the IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "missing IRQ\n"); err = irq; - goto disable_clk; + goto disable_qspick; } err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, 0, dev_name(&pdev->dev), aq); if (err) - goto disable_clk; + goto disable_qspick; err = atmel_qspi_init(aq); if (err) - goto disable_clk; + goto disable_qspick; err = spi_register_controller(ctrl); if (err) - goto disable_clk; + goto disable_qspick; return 0; -disable_clk: - clk_disable_unprepare(aq->clk); +disable_qspick: + clk_disable_unprepare(aq->qspick); +disable_pclk: + clk_disable_unprepare(aq->pclk); exit: spi_controller_put(ctrl); @@ -499,8 +562,9 @@ static int atmel_qspi_remove(struct platform_device *pdev) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); spi_unregister_controller(ctrl); - qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS); - clk_disable_unprepare(aq->clk); + writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR); + clk_disable_unprepare(aq->qspick); + clk_disable_unprepare(aq->pclk); return 0; } @@ -508,7 +572,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev) { struct atmel_qspi *aq = dev_get_drvdata(dev); - clk_disable_unprepare(aq->clk); + clk_disable_unprepare(aq->qspick); + clk_disable_unprepare(aq->pclk); return 0; } @@ -517,7 +582,8 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) { struct atmel_qspi *aq = dev_get_drvdata(dev); - clk_prepare_enable(aq->clk); + clk_prepare_enable(aq->pclk); + clk_prepare_enable(aq->qspick); return atmel_qspi_init(aq); } @@ -525,8 +591,22 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend, atmel_qspi_resume); +static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {}; + +static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = { + .has_qspick = true, + .has_ricr = true, +}; + static const struct of_device_id atmel_qspi_dt_ids[] = { - { .compatible = "atmel,sama5d2-qspi" }, + { + .compatible = "atmel,sama5d2-qspi", + .data = &atmel_sama5d2_qspi_caps, + }, + { + .compatible = "microchip,sam9x60-qspi", + .data = &atmel_sam9x60_qspi_caps, + }, { /* sentinel */ } }; diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 3f6b657394de..847f354ebef1 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -21,18 +21,26 @@ #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/bitops.h> -#include <linux/gpio.h> #include <linux/clk.h> #include <linux/err.h> - -#include <asm/mach-ath79/ar71xx_regs.h> -#include <asm/mach-ath79/ath79_spi_platform.h> +#include <linux/platform_data/spi-ath79.h> #define DRV_NAME "ath79-spi" #define ATH79_SPI_RRW_DELAY_FACTOR 12000 #define MHZ (1000 * 1000) +#define AR71XX_SPI_REG_FS 0x00 /* Function Select */ +#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */ +#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */ +#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */ + +#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */ + +#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */ +#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */ +#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n)) + struct ath79_spi { struct spi_bitbang bitbang; u32 ioc_base; @@ -67,31 +75,14 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active) { struct ath79_spi *sp = ath79_spidev_to_sp(spi); int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active; + u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - if (is_active) { - /* set initial clock polarity */ - if (spi->mode & SPI_CPOL) - sp->ioc_base |= AR71XX_SPI_IOC_CLK; - else - sp->ioc_base &= ~AR71XX_SPI_IOC_CLK; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - - if (gpio_is_valid(spi->cs_gpio)) { - /* SPI is normally active-low */ - gpio_set_value_cansleep(spi->cs_gpio, cs_high); - } else { - u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - - if (cs_high) - sp->ioc_base |= cs_bit; - else - sp->ioc_base &= ~cs_bit; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } + if (cs_high) + sp->ioc_base |= cs_bit; + else + sp->ioc_base &= ~cs_bit; + ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); } static void ath79_spi_enable(struct ath79_spi *sp) @@ -103,6 +94,9 @@ static void ath79_spi_enable(struct ath79_spi *sp) sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL); sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC); + /* clear clk and mosi in the base state */ + sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK); + /* TODO: setup speed? */ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43); } @@ -115,66 +109,6 @@ static void ath79_spi_disable(struct ath79_spi *sp) ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0); } -static int ath79_spi_setup_cs(struct spi_device *spi) -{ - struct ath79_spi *sp = ath79_spidev_to_sp(spi); - int status; - - status = 0; - if (gpio_is_valid(spi->cs_gpio)) { - unsigned long flags; - - flags = GPIOF_DIR_OUT; - if (spi->mode & SPI_CS_HIGH) - flags |= GPIOF_INIT_LOW; - else - flags |= GPIOF_INIT_HIGH; - - status = gpio_request_one(spi->cs_gpio, flags, - dev_name(&spi->dev)); - } else { - u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - - if (spi->mode & SPI_CS_HIGH) - sp->ioc_base &= ~cs_bit; - else - sp->ioc_base |= cs_bit; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - - return status; -} - -static void ath79_spi_cleanup_cs(struct spi_device *spi) -{ - if (gpio_is_valid(spi->cs_gpio)) - gpio_free(spi->cs_gpio); -} - -static int ath79_spi_setup(struct spi_device *spi) -{ - int status = 0; - - if (!spi->controller_state) { - status = ath79_spi_setup_cs(spi); - if (status) - return status; - } - - status = spi_bitbang_setup(spi); - if (status && !spi->controller_state) - ath79_spi_cleanup_cs(spi); - - return status; -} - -static void ath79_spi_cleanup(struct spi_device *spi) -{ - ath79_spi_cleanup_cs(spi); - spi_bitbang_cleanup(spi); -} - static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs, u32 word, u8 bits, unsigned flags) { @@ -225,9 +159,10 @@ static int ath79_spi_probe(struct platform_device *pdev) pdata = dev_get_platdata(&pdev->dev); + master->use_gpio_descriptors = true; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); - master->setup = ath79_spi_setup; - master->cleanup = ath79_spi_cleanup; + master->setup = spi_bitbang_setup; + master->cleanup = spi_bitbang_cleanup; if (pdata) { master->bus_num = pdata->bus_num; master->num_chipselect = pdata->num_chipselect; @@ -236,7 +171,6 @@ static int ath79_spi_probe(struct platform_device *pdev) sp->bitbang.master = master; sp->bitbang.chipselect = ath79_spi_chipselect; sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0; - sp->bitbang.setup_transfer = spi_bitbang_setup_transfer; sp->bitbang.flags = SPI_CS_HIGH; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 74fddcd3282b..4954f0ab1606 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -23,8 +23,7 @@ #include <linux/of.h> #include <linux/io.h> -#include <linux/gpio.h> -#include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> @@ -312,7 +311,7 @@ struct atmel_spi { /* Controller-specific per-slave state */ struct atmel_spi_device { - unsigned int npcs_pin; + struct gpio_desc *npcs_pin; u32 csr; }; @@ -355,7 +354,6 @@ static bool atmel_spi_is_v2(struct atmel_spi *as) static void cs_activate(struct atmel_spi *as, struct spi_device *spi) { struct atmel_spi_device *asd = spi->controller_state; - unsigned active = spi->mode & SPI_CS_HIGH; u32 mr; if (atmel_spi_is_v2(as)) { @@ -379,7 +377,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) mr = spi_readl(as, MR); if (as->use_cs_gpios) - gpio_set_value(asd->npcs_pin, active); + gpiod_set_value(asd->npcs_pin, 1); } else { u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; int i; @@ -396,19 +394,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) mr = spi_readl(as, MR); mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); if (as->use_cs_gpios && spi->chip_select != 0) - gpio_set_value(asd->npcs_pin, active); + gpiod_set_value(asd->npcs_pin, 1); spi_writel(as, MR, mr); } - dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", - asd->npcs_pin, active ? " (high)" : "", - mr); + dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr); } static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) { struct atmel_spi_device *asd = spi->controller_state; - unsigned active = spi->mode & SPI_CS_HIGH; u32 mr; /* only deactivate *this* device; sometimes transfers to @@ -420,14 +415,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) spi_writel(as, MR, mr); } - dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", - asd->npcs_pin, active ? " (low)" : "", - mr); + dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr); if (!as->use_cs_gpios) spi_writel(as, CR, SPI_BIT(LASTXFER)); else if (atmel_spi_is_v2(as) || spi->chip_select != 0) - gpio_set_value(asd->npcs_pin, !active); + gpiod_set_value(asd->npcs_pin, 0); } static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) @@ -1188,7 +1181,6 @@ static int atmel_spi_setup(struct spi_device *spi) struct atmel_spi_device *asd; u32 csr; unsigned int bits = spi->bits_per_word; - unsigned int npcs_pin; as = spi_master_get_devdata(spi->master); @@ -1209,21 +1201,14 @@ static int atmel_spi_setup(struct spi_device *spi) csr |= SPI_BIT(CSAAT); /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. - * - * DLYBCT would add delays between words, slowing down transfers. - * It could potentially be useful to cope with DMA bottlenecks, but - * in those cases it's probably best to just use a lower bitrate. */ csr |= SPI_BF(DLYBS, 0); - csr |= SPI_BF(DLYBCT, 0); - - /* chipselect must have been muxed as GPIO (e.g. in board setup) */ - npcs_pin = (unsigned long)spi->controller_data; - if (!as->use_cs_gpios) - npcs_pin = spi->chip_select; - else if (gpio_is_valid(spi->cs_gpio)) - npcs_pin = spi->cs_gpio; + /* DLYBCT adds delays between words. This is useful for slow devices + * that need a bit of time to setup the next transfer. + */ + csr |= SPI_BF(DLYBCT, + (as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5); asd = spi->controller_state; if (!asd) { @@ -1231,11 +1216,21 @@ static int atmel_spi_setup(struct spi_device *spi) if (!asd) return -ENOMEM; - if (as->use_cs_gpios) - gpio_direction_output(npcs_pin, - !(spi->mode & SPI_CS_HIGH)); + /* + * If use_cs_gpios is true this means that we have "cs-gpios" + * defined in the device tree node so we should have + * gotten the GPIO lines from the device tree inside the + * SPI core. Warn if this is not the case but continue since + * CS GPIOs are after all optional. + */ + if (as->use_cs_gpios) { + if (!spi->cs_gpiod) { + dev_err(&spi->dev, + "host claims to use CS GPIOs but no CS found in DT by the SPI core\n"); + } + asd->npcs_pin = spi->cs_gpiod; + } - asd->npcs_pin = npcs_pin; spi->controller_state = asd; } @@ -1473,41 +1468,6 @@ static void atmel_get_caps(struct atmel_spi *as) as->caps.has_pdc_support = version < 0x212; } -/*-------------------------------------------------------------------------*/ -static int atmel_spi_gpio_cs(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct atmel_spi *as = spi_master_get_devdata(master); - struct device_node *np = master->dev.of_node; - int i; - int ret = 0; - int nb = 0; - - if (!as->use_cs_gpios) - return 0; - - if (!np) - return 0; - - nb = of_gpio_named_count(np, "cs-gpios"); - for (i = 0; i < nb; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) - return cs_gpio; - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - return ret; - } - } - - return 0; -} - static void atmel_spi_init(struct atmel_spi *as) { spi_writel(as, CR, SPI_BIT(SWRST)); @@ -1560,6 +1520,7 @@ static int atmel_spi_probe(struct platform_device *pdev) goto out_free; /* the spi->mode bits understood by this driver: */ + master->use_gpio_descriptors = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); master->dev.of_node = pdev->dev.of_node; @@ -1592,6 +1553,11 @@ static int atmel_spi_probe(struct platform_device *pdev) atmel_get_caps(as); + /* + * If there are chip selects in the device tree, those will be + * discovered by the SPI core when registering the SPI master + * and assigned to each SPI device. + */ as->use_cs_gpios = true; if (atmel_spi_is_v2(as) && pdev->dev.of_node && @@ -1600,10 +1566,6 @@ static int atmel_spi_probe(struct platform_device *pdev) master->num_chipselect = 4; } - ret = atmel_spi_gpio_cs(pdev); - if (ret) - goto out_unmap_regs; - as->use_dma = false; as->use_pdc = false; if (as->caps.has_dma_support) { diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 671e374e1b01..f7e054848ca5 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -456,7 +456,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) } bs->clk = devm_clk_get(&pdev->dev, NULL); - if ((!bs->clk) || (IS_ERR(bs->clk))) { + if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); dev_err(&pdev->dev, "could not get clk: %d\n", err); goto out_master_put; diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index f29176000b8d..dd9a8c54a693 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -213,19 +213,6 @@ int spi_bitbang_setup(struct spi_device *spi) dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); - /* NOTE we _need_ to call chipselect() early, ideally with adapter - * setup, unless the hardware defaults cooperate to avoid confusion - * between normal (active low) and inverted chipselects. - */ - - /* deselect chip (low or high) */ - mutex_lock(&bitbang->lock); - if (!bitbang->busy) { - bitbang->chipselect(spi, BITBANG_CS_INACTIVE); - ndelay(cs->nsecs); - } - mutex_unlock(&bitbang->lock); - return 0; } EXPORT_SYMBOL_GPL(spi_bitbang_setup); diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 7c88f74f7f47..43d0e79842ac 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -13,7 +13,7 @@ #include <linux/clk.h> #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> @@ -128,10 +128,6 @@ struct cdns_spi { u32 is_decoded_cs; }; -struct cdns_spi_device_data { - bool gpio_requested; -}; - /* Macros for the SPI controller read/write */ static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset) { @@ -176,16 +172,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi) /** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure - * @is_high: Select(0) or deselect (1) the chip select line + * @enable: Select (1) or deselect (0) the chip select line */ -static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) +static void cdns_spi_chipselect(struct spi_device *spi, bool enable) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); - if (is_high) { + if (!enable) { /* Deselect the slave */ ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { @@ -469,64 +465,6 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master) return 0; } -static int cdns_spi_setup(struct spi_device *spi) -{ - - int ret = -EINVAL; - struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi); - - /* this is a pin managed by the controller, leave it alone */ - if (spi->cs_gpio == -ENOENT) - return 0; - - /* this seems to be the first time we're here */ - if (!cdns_spi_data) { - cdns_spi_data = kzalloc(sizeof(*cdns_spi_data), GFP_KERNEL); - if (!cdns_spi_data) - return -ENOMEM; - cdns_spi_data->gpio_requested = false; - spi_set_ctldata(spi, cdns_spi_data); - } - - /* if we haven't done so, grab the gpio */ - if (!cdns_spi_data->gpio_requested && gpio_is_valid(spi->cs_gpio)) { - ret = gpio_request_one(spi->cs_gpio, - (spi->mode & SPI_CS_HIGH) ? - GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, - dev_name(&spi->dev)); - if (ret) - dev_err(&spi->dev, "can't request chipselect gpio %d\n", - spi->cs_gpio); - else - cdns_spi_data->gpio_requested = true; - } else { - if (gpio_is_valid(spi->cs_gpio)) { - int mode = ((spi->mode & SPI_CS_HIGH) ? - GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH); - - ret = gpio_direction_output(spi->cs_gpio, mode); - if (ret) - dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n", - spi->cs_gpio, ret); - } - } - - return ret; -} - -static void cdns_spi_cleanup(struct spi_device *spi) -{ - struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi); - - if (cdns_spi_data) { - if (cdns_spi_data->gpio_requested) - gpio_free(spi->cs_gpio); - kfree(cdns_spi_data); - spi_set_ctldata(spi, NULL); - } - -} - /** * cdns_spi_probe - Probe method for the SPI driver * @pdev: Pointer to the platform_device structure @@ -584,11 +522,6 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_apb; } - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; @@ -603,8 +536,10 @@ static int cdns_spi_probe(struct platform_device *pdev) /* SPI controller initializations */ cdns_spi_init_hw(xspi); - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); irq = platform_get_irq(pdev, 0); if (irq <= 0) { @@ -621,13 +556,12 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_all; } + master->use_gpio_descriptors = true; master->prepare_transfer_hardware = cdns_prepare_transfer_hardware; master->prepare_message = cdns_prepare_message; master->transfer_one = cdns_transfer_one; master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware; master->set_cs = cdns_spi_chipselect; - master->setup = cdns_spi_setup; - master->cleanup = cdns_spi_cleanup; master->auto_runtime_pm = true; master->mode_bits = SPI_CPOL | SPI_CPHA; diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c index 18193df2eba8..8c03c409fc07 100644 --- a/drivers/spi/spi-clps711x.c +++ b/drivers/spi/spi-clps711x.c @@ -11,7 +11,7 @@ #include <linux/io.h> #include <linux/clk.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> @@ -36,25 +36,6 @@ struct spi_clps711x_data { int len; }; -static int spi_clps711x_setup(struct spi_device *spi) -{ - if (!spi->controller_state) { - int ret; - - ret = devm_gpio_request(&spi->master->dev, spi->cs_gpio, - dev_name(&spi->master->dev)); - if (ret) - return ret; - - spi->controller_state = spi; - } - - /* We are expect that SPI-device is not selected */ - gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); - - return 0; -} - static int spi_clps711x_prepare_message(struct spi_master *master, struct spi_message *msg) { @@ -125,11 +106,11 @@ static int spi_clps711x_probe(struct platform_device *pdev) if (!master) return -ENOMEM; + master->use_gpio_descriptors = true; master->bus_num = -1; master->mode_bits = SPI_CPHA | SPI_CS_HIGH; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8); master->dev.of_node = pdev->dev.of_node; - master->setup = spi_clps711x_setup; master->prepare_message = spi_clps711x_prepare_message; master->transfer_one = spi_clps711x_transfer_one; diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 56adec83f8fc..eb246ebcfa3a 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -15,7 +15,7 @@ #include <linux/interrupt.h> #include <linux/io.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/platform_device.h> @@ -25,7 +25,6 @@ #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/slab.h> @@ -222,12 +221,17 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) * Board specific chip select logic decides the polarity and cs * line for the controller */ - if (spi->cs_gpio >= 0) { + if (spi->cs_gpiod) { + /* + * FIXME: is this code ever executed? This host does not + * set SPI_MASTER_GPIO_SS so this chipselect callback should + * not get called from the SPI core when we are using + * GPIOs for chip select. + */ if (value == BITBANG_CS_ACTIVE) - gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH); + gpiod_set_value(spi->cs_gpiod, 1); else - gpio_set_value(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); + gpiod_set_value(spi->cs_gpiod, 0); } else { if (value == BITBANG_CS_ACTIVE) { if (!(spi->mode & SPI_CS_WORD)) @@ -418,30 +422,18 @@ static int davinci_spi_of_setup(struct spi_device *spi) */ static int davinci_spi_setup(struct spi_device *spi) { - int retval = 0; struct davinci_spi *dspi; - struct spi_master *master = spi->master; struct device_node *np = spi->dev.of_node; bool internal_cs = true; dspi = spi_master_get_devdata(spi->master); if (!(spi->mode & SPI_NO_CS)) { - if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) { - retval = gpio_direction_output( - spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); + if (np && spi->cs_gpiod) internal_cs = false; - } - - if (retval) { - dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", - spi->cs_gpio, retval); - return retval; - } - if (internal_cs) { + if (internal_cs) set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); - } } if (spi->mode & SPI_READY) @@ -962,6 +954,7 @@ static int davinci_spi_probe(struct platform_device *pdev) if (ret) goto free_master; + master->use_gpio_descriptors = true; master->dev.of_node = pdev->dev.of_node; master->bus_num = pdev->id; master->num_chipselect = pdata->num_chipselect; @@ -980,27 +973,6 @@ static int davinci_spi_probe(struct platform_device *pdev) if (dspi->version == SPI_VERSION_2) dspi->bitbang.flags |= SPI_READY; - if (pdev->dev.of_node) { - int i; - - for (i = 0; i < pdata->num_chipselect; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) { - ret = cs_gpio; - goto free_clk; - } - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - goto free_clk; - } - } - } - dspi->bitbang.txrx_bufs = davinci_spi_bufs; ret = davinci_spi_request_dma(dspi); diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index d0dd7814e997..4bd59a93d988 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -18,7 +18,6 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_platform.h> #include <linux/acpi.h> #include <linux/property.h> @@ -185,27 +184,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) dws->num_cs = num_cs; - if (pdev->dev.of_node) { - int i; - - for (i = 0; i < dws->num_cs; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) { - ret = cs_gpio; - goto out; - } - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - goto out; - } - } - } - init_func = device_get_match_data(&pdev->dev); if (init_func) { ret = init_func(pdev, dwsmmio); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 2e822a56576a..ac81025f86ab 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -20,7 +20,6 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/spi/spi.h> -#include <linux/gpio.h> #include "spi-dw.h" @@ -54,41 +53,41 @@ static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf, if (!buf) return 0; - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "%s registers:\n", dev_name(&dws->master->dev)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "=================================\n"); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "=================================\n"); ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -138,11 +137,10 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) struct dw_spi *dws = spi_controller_get_devdata(spi->controller); struct chip_data *chip = spi_get_ctldata(spi); - /* Chip select logic is inverted from spi_set_cs() */ if (chip && chip->cs_control) - chip->cs_control(!enable); + chip->cs_control(enable); - if (!enable) + if (enable) dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); else if (dws->cs_override) dw_writel(dws, DW_SPI_SER, 0); @@ -317,7 +315,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, /* Default SPI mode is SCPOL = 0, SCPH = 0 */ cr0 = (transfer->bits_per_word - 1) | (chip->type << SPI_FRF_OFFSET) - | (spi->mode << SPI_MODE_OFFSET) + | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) | + (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET)) | (chip->tmode << SPI_TMOD_OFFSET); /* @@ -397,7 +396,6 @@ static int dw_spi_setup(struct spi_device *spi) { struct dw_spi_chip *chip_info = NULL; struct chip_data *chip; - int ret; /* Only alloc on first setup */ chip = spi_get_ctldata(spi); @@ -425,13 +423,6 @@ static int dw_spi_setup(struct spi_device *spi) chip->tmode = SPI_TMOD_TR; - if (gpio_is_valid(spi->cs_gpio)) { - ret = gpio_direction_output(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); - if (ret) - return ret; - } - return 0; } @@ -496,6 +487,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) goto err_free_master; } + master->use_gpio_descriptors = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); master->bus_num = dws->bus_num; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 5e10dc5c93a5..53335ccc98f6 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -67,7 +67,7 @@ #define SPI_SR 0x2c #define SPI_SR_EOQF 0x10000000 #define SPI_SR_TCFQF 0x80000000 -#define SPI_SR_CLEAR 0xdaad0000 +#define SPI_SR_CLEAR 0x9aaf0000 #define SPI_RSER_TFFFE BIT(25) #define SPI_RSER_TFFFD BIT(24) @@ -233,6 +233,9 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) { u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); + if (spi_controller_is_slave(dspi->master)) + return data; + if (dspi->len > 0) cmd |= SPI_PUSHR_CMD_CONT; return cmd << 16 | data; @@ -329,6 +332,11 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) dma_async_issue_pending(dma->chan_rx); dma_async_issue_pending(dma->chan_tx); + if (spi_controller_is_slave(dspi->master)) { + wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete); + return 0; + } + time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete, DMA_COMPLETION_TIMEOUT); if (time_left == 0) { @@ -798,14 +806,18 @@ static int dspi_setup(struct spi_device *spi) ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate); chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) - | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) - | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) - | SPI_CTAR_PCSSCK(pcssck) - | SPI_CTAR_CSSCK(cssck) - | SPI_CTAR_PASC(pasc) - | SPI_CTAR_ASC(asc) - | SPI_CTAR_PBR(pbr) - | SPI_CTAR_BR(br); + | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0); + + if (!spi_controller_is_slave(dspi->master)) { + chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode & + SPI_LSB_FIRST ? 1 : 0) + | SPI_CTAR_PCSSCK(pcssck) + | SPI_CTAR_CSSCK(cssck) + | SPI_CTAR_PASC(pasc) + | SPI_CTAR_ASC(asc) + | SPI_CTAR_PBR(pbr) + | SPI_CTAR_BR(br); + } spi_set_ctldata(spi, chip); @@ -970,8 +982,13 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { static void dspi_init(struct fsl_dspi *dspi) { - regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS | - (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0)); + unsigned int mcr = SPI_MCR_PCSIS | + (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0); + + if (!spi_controller_is_slave(dspi->master)) + mcr |= SPI_MCR_MASTER; + + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); if (dspi->devtype_data->xspi_mode) regmap_write(dspi->regmap, SPI_CTARE(0), @@ -1027,6 +1044,9 @@ static int dspi_probe(struct platform_device *pdev) } master->bus_num = bus_num; + if (of_property_read_bool(np, "spi-slave")) + master->slave = true; + dspi->devtype_data = of_device_get_match_data(&pdev->dev); if (!dspi->devtype_data) { dev_err(&pdev->dev, "can't get devtype_data\n"); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 08dcc3c22e88..391863914043 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -48,10 +48,13 @@ #define CR_RTF BIT(8) #define CR_RST BIT(1) #define CR_MEN BIT(0) +#define SR_MBF BIT(24) #define SR_TCF BIT(10) +#define SR_FCF BIT(9) #define SR_RDF BIT(1) #define SR_TDF BIT(0) #define IER_TCIE BIT(10) +#define IER_FCIE BIT(9) #define IER_RDIE BIT(1) #define IER_TDIE BIT(0) #define CFGR1_PCSCFG BIT(27) @@ -59,6 +62,7 @@ #define CFGR1_PCSPOL BIT(8) #define CFGR1_NOSTALL BIT(3) #define CFGR1_MASTER BIT(0) +#define FSR_RXCOUNT (BIT(16)|BIT(17)|BIT(18)) #define RSR_RXEMPTY BIT(1) #define TCR_CPOL BIT(31) #define TCR_CPHA BIT(30) @@ -161,28 +165,10 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller) return 0; } -static int fsl_lpspi_txfifo_empty(struct fsl_lpspi_data *fsl_lpspi) -{ - u32 txcnt; - unsigned long orig_jiffies = jiffies; - - do { - txcnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; - - if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { - dev_dbg(fsl_lpspi->dev, "txfifo empty timeout\n"); - return -ETIMEDOUT; - } - cond_resched(); - - } while (txcnt); - - return 0; -} - static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) { u8 txfifo_cnt; + u32 temp; txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; @@ -193,9 +179,15 @@ static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) txfifo_cnt++; } - if (!fsl_lpspi->remain && (txfifo_cnt < fsl_lpspi->txfifosize)) - writel(0, fsl_lpspi->base + IMX7ULP_TDR); - else + if (txfifo_cnt < fsl_lpspi->txfifosize) { + if (!fsl_lpspi->is_slave) { + temp = readl(fsl_lpspi->base + IMX7ULP_TCR); + temp &= ~TCR_CONTC; + writel(temp, fsl_lpspi->base + IMX7ULP_TCR); + } + + fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); + } else fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE); } @@ -276,10 +268,6 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) u32 temp; int ret; - temp = CR_RST; - writel(temp, fsl_lpspi->base + IMX7ULP_CR); - writel(0, fsl_lpspi->base + IMX7ULP_CR); - if (!fsl_lpspi->is_slave) { ret = fsl_lpspi_set_bitrate(fsl_lpspi); if (ret) @@ -370,6 +358,24 @@ static int fsl_lpspi_wait_for_completion(struct spi_controller *controller) return 0; } +static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi) +{ + u32 temp; + + /* Disable all interrupt */ + fsl_lpspi_intctrl(fsl_lpspi, 0); + + /* W1C for all flags in SR */ + temp = 0x3F << 8; + writel(temp, fsl_lpspi->base + IMX7ULP_SR); + + /* Clear FIFO and disable module */ + temp = CR_RRF | CR_RTF; + writel(temp, fsl_lpspi->base + IMX7ULP_CR); + + return 0; +} + static int fsl_lpspi_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *t) @@ -391,11 +397,7 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller, if (ret) return ret; - ret = fsl_lpspi_txfifo_empty(fsl_lpspi); - if (ret) - return ret; - - fsl_lpspi_read_rx_fifo(fsl_lpspi); + fsl_lpspi_reset(fsl_lpspi); return 0; } @@ -408,7 +410,6 @@ static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller, struct spi_device *spi = msg->spi; struct spi_transfer *xfer; bool is_first_xfer = true; - u32 temp; int ret = 0; msg->status = 0; @@ -428,13 +429,6 @@ static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller, } complete: - if (!fsl_lpspi->is_slave) { - /* de-assert SS, then finalize current message */ - temp = readl(fsl_lpspi->base + IMX7ULP_TCR); - temp &= ~TCR_CONTC; - writel(temp, fsl_lpspi->base + IMX7ULP_TCR); - } - msg->status = ret; spi_finalize_current_message(controller); @@ -443,20 +437,30 @@ complete: static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id) { + u32 temp_SR, temp_IER; struct fsl_lpspi_data *fsl_lpspi = dev_id; - u32 temp; + temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER); fsl_lpspi_intctrl(fsl_lpspi, 0); - temp = readl(fsl_lpspi->base + IMX7ULP_SR); + temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR); fsl_lpspi_read_rx_fifo(fsl_lpspi); - if (temp & SR_TDF) { + if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) { fsl_lpspi_write_tx_fifo(fsl_lpspi); + return IRQ_HANDLED; + } - if (!fsl_lpspi->remain) - complete(&fsl_lpspi->xfer_done); + if (temp_SR & SR_MBF || + readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_RXCOUNT) { + writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); + fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); + return IRQ_HANDLED; + } + if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) { + writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); + complete(&fsl_lpspi->xfer_done); return IRQ_HANDLED; } diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c new file mode 100644 index 000000000000..6a713f78a62e --- /dev/null +++ b/drivers/spi/spi-fsl-qspi.c @@ -0,0 +1,966 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Freescale QuadSPI driver. + * + * Copyright (C) 2013 Freescale Semiconductor, Inc. + * Copyright (C) 2018 Bootlin + * Copyright (C) 2018 exceet electronics GmbH + * Copyright (C) 2018 Kontron Electronics GmbH + * + * Transition to SPI MEM interface: + * Authors: + * Boris Brezillon <bbrezillon@kernel.org> + * Frieder Schrempf <frieder.schrempf@kontron.de> + * Yogesh Gaur <yogeshnarayan.gaur@nxp.com> + * Suresh Gupta <suresh.gupta@nxp.com> + * + * Based on the original fsl-quadspi.c spi-nor driver: + * Author: Freescale Semiconductor, Inc. + * + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_qos.h> +#include <linux/sizes.h> + +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +/* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry (15). + */ +#define SEQID_LUT 15 + +/* Registers used by the driver */ +#define QUADSPI_MCR 0x00 +#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16) +#define QUADSPI_MCR_MDIS_MASK BIT(14) +#define QUADSPI_MCR_CLR_TXF_MASK BIT(11) +#define QUADSPI_MCR_CLR_RXF_MASK BIT(10) +#define QUADSPI_MCR_DDR_EN_MASK BIT(7) +#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2) +#define QUADSPI_MCR_SWRSTHD_MASK BIT(1) +#define QUADSPI_MCR_SWRSTSD_MASK BIT(0) + +#define QUADSPI_IPCR 0x08 +#define QUADSPI_IPCR_SEQID(x) ((x) << 24) + +#define QUADSPI_BUF3CR 0x1c +#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31) +#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8) +#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8) + +#define QUADSPI_BFGENCR 0x20 +#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12) + +#define QUADSPI_BUF0IND 0x30 +#define QUADSPI_BUF1IND 0x34 +#define QUADSPI_BUF2IND 0x38 +#define QUADSPI_SFAR 0x100 + +#define QUADSPI_SMPR 0x108 +#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16) +#define QUADSPI_SMPR_FSDLY_MASK BIT(6) +#define QUADSPI_SMPR_FSPHS_MASK BIT(5) +#define QUADSPI_SMPR_HSENA_MASK BIT(0) + +#define QUADSPI_RBCT 0x110 +#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0) +#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8) + +#define QUADSPI_TBDR 0x154 + +#define QUADSPI_SR 0x15c +#define QUADSPI_SR_IP_ACC_MASK BIT(1) +#define QUADSPI_SR_AHB_ACC_MASK BIT(2) + +#define QUADSPI_FR 0x160 +#define QUADSPI_FR_TFF_MASK BIT(0) + +#define QUADSPI_SPTRCLR 0x16c +#define QUADSPI_SPTRCLR_IPPTRC BIT(8) +#define QUADSPI_SPTRCLR_BFPTRC BIT(0) + +#define QUADSPI_SFA1AD 0x180 +#define QUADSPI_SFA2AD 0x184 +#define QUADSPI_SFB1AD 0x188 +#define QUADSPI_SFB2AD 0x18c +#define QUADSPI_RBDR(x) (0x200 + ((x) * 4)) + +#define QUADSPI_LUTKEY 0x300 +#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0 + +#define QUADSPI_LCKCR 0x304 +#define QUADSPI_LCKER_LOCK BIT(0) +#define QUADSPI_LCKER_UNLOCK BIT(1) + +#define QUADSPI_RSER 0x164 +#define QUADSPI_RSER_TFIE BIT(0) + +#define QUADSPI_LUT_BASE 0x310 +#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) +#define QUADSPI_LUT_REG(idx) \ + (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4) + +/* Instruction set for the LUT register */ +#define LUT_STOP 0 +#define LUT_CMD 1 +#define LUT_ADDR 2 +#define LUT_DUMMY 3 +#define LUT_MODE 4 +#define LUT_MODE2 5 +#define LUT_MODE4 6 +#define LUT_FSL_READ 7 +#define LUT_FSL_WRITE 8 +#define LUT_JMP_ON_CS 9 +#define LUT_ADDR_DDR 10 +#define LUT_MODE_DDR 11 +#define LUT_MODE2_DDR 12 +#define LUT_MODE4_DDR 13 +#define LUT_FSL_READ_DDR 14 +#define LUT_FSL_WRITE_DDR 15 +#define LUT_DATA_LEARN 16 + +/* + * The PAD definitions for LUT register. + * + * The pad stands for the number of IO lines [0:3]. + * For example, the quad read needs four IO lines, + * so you should use LUT_PAD(4). + */ +#define LUT_PAD(x) (fls(x) - 1) + +/* + * Macro for constructing the LUT entries with the following + * register layout: + * + * --------------------------------------------------- + * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | + * --------------------------------------------------- + */ +#define LUT_DEF(idx, ins, pad, opr) \ + ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16)) + +/* Controller needs driver to swap endianness */ +#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0) + +/* Controller needs 4x internal clock */ +#define QUADSPI_QUIRK_4X_INT_CLK BIT(1) + +/* + * TKT253890, the controller needs the driver to fill the txfifo with + * 16 bytes at least to trigger a data transfer, even though the extra + * data won't be transferred. + */ +#define QUADSPI_QUIRK_TKT253890 BIT(2) + +/* TKT245618, the controller cannot wake up from wait mode */ +#define QUADSPI_QUIRK_TKT245618 BIT(3) + +/* + * Controller adds QSPI_AMBA_BASE (base address of the mapped memory) + * internally. No need to add it when setting SFXXAD and SFAR registers + */ +#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4) + +struct fsl_qspi_devtype_data { + unsigned int rxfifo; + unsigned int txfifo; + unsigned int ahb_buf_size; + unsigned int quirks; + bool little_endian; +}; + +static const struct fsl_qspi_devtype_data vybrid_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_SWAP_ENDIAN, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx6sx_data = { + .rxfifo = SZ_128, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx7d_data = { + .rxfifo = SZ_512, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx6ul_data = { + .rxfifo = SZ_128, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data ls1021a_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = 0, + .little_endian = false, +}; + +static const struct fsl_qspi_devtype_data ls2080a_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL, + .little_endian = true, +}; + +struct fsl_qspi { + void __iomem *iobase; + void __iomem *ahb_addr; + u32 memmap_phy; + struct clk *clk, *clk_en; + struct device *dev; + struct completion c; + const struct fsl_qspi_devtype_data *devtype_data; + struct mutex lock; + struct pm_qos_request pm_qos_req; + int selected; +}; + +static inline int needs_swap_endian(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; +} + +static inline int needs_4x_clock(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; +} + +static inline int needs_fill_txfifo(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; +} + +static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; +} + +static inline int needs_amba_base_offset(struct fsl_qspi *q) +{ + return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); +} + +/* + * An IC bug makes it necessary to rearrange the 32-bit data. + * Later chips, such as IMX6SLX, have fixed this bug. + */ +static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) +{ + return needs_swap_endian(q) ? __swab32(a) : a; +} + +/* + * R/W functions for big- or little-endian registers: + * The QSPI controller's endianness is independent of + * the CPU core's endianness. So far, although the CPU + * core is little-endian the QSPI controller can use + * big-endian or little-endian. + */ +static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) +{ + if (q->devtype_data->little_endian) + iowrite32(val, addr); + else + iowrite32be(val, addr); +} + +static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) +{ + if (q->devtype_data->little_endian) + return ioread32(addr); + + return ioread32be(addr); +} + +static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) +{ + struct fsl_qspi *q = dev_id; + u32 reg; + + /* clear interrupt */ + reg = qspi_readl(q, q->iobase + QUADSPI_FR); + qspi_writel(q, reg, q->iobase + QUADSPI_FR); + + if (reg & QUADSPI_FR_TFF_MASK) + complete(&q->c); + + dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg); + return IRQ_HANDLED; +} + +static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width) +{ + switch (width) { + case 1: + case 2: + case 4: + return 0; + } + + return -ENOTSUPP; +} + +static bool fsl_qspi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + int ret; + + ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth); + + if (op->addr.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth); + + if (op->dummy.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth); + + if (op->data.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->data.buswidth); + + if (ret) + return false; + + /* + * The number of instructions needed for the op, needs + * to fit into a single LUT entry. + */ + if (op->addr.nbytes + + (op->dummy.nbytes ? 1:0) + + (op->data.nbytes ? 1:0) > 6) + return false; + + /* Max 64 dummy clock cycles supported */ + if (op->dummy.nbytes && + (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) + return false; + + /* Max data length, check controller limits and alignment */ + if (op->data.dir == SPI_MEM_DATA_IN && + (op->data.nbytes > q->devtype_data->ahb_buf_size || + (op->data.nbytes > q->devtype_data->rxfifo - 4 && + !IS_ALIGNED(op->data.nbytes, 8)))) + return false; + + if (op->data.dir == SPI_MEM_DATA_OUT && + op->data.nbytes > q->devtype_data->txfifo) + return false; + + return true; +} + +static void fsl_qspi_prepare_lut(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + u32 lutval[4] = {}; + int lutidx = 1, i; + + lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), + op->cmd.opcode); + + /* + * For some unknown reason, using LUT_ADDR doesn't work in some + * cases (at least with only one byte long addresses), so + * let's use LUT_MODE to write the address bytes one by one + */ + for (i = 0; i < op->addr.nbytes; i++) { + u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1)); + + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE, + LUT_PAD(op->addr.buswidth), + addrbyte); + lutidx++; + } + + if (op->dummy.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY, + LUT_PAD(op->dummy.buswidth), + op->dummy.nbytes * 8 / + op->dummy.buswidth); + lutidx++; + } + + if (op->data.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, + op->data.dir == SPI_MEM_DATA_IN ? + LUT_FSL_READ : LUT_FSL_WRITE, + LUT_PAD(op->data.buswidth), + 0); + lutidx++; + } + + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0); + + /* unlock LUT */ + qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); + qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); + + /* fill LUT */ + for (i = 0; i < ARRAY_SIZE(lutval); i++) + qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i)); + + /* lock LUT */ + qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); + qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); +} + +static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) +{ + int ret; + + ret = clk_prepare_enable(q->clk_en); + if (ret) + return ret; + + ret = clk_prepare_enable(q->clk); + if (ret) { + clk_disable_unprepare(q->clk_en); + return ret; + } + + if (needs_wakeup_wait_mode(q)) + pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0); + + return 0; +} + +static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) +{ + if (needs_wakeup_wait_mode(q)) + pm_qos_remove_request(&q->pm_qos_req); + + clk_disable_unprepare(q->clk); + clk_disable_unprepare(q->clk_en); +} + +/* + * If we have changed the content of the flash by writing or erasing, or if we + * read from flash with a different offset into the page buffer, we need to + * invalidate the AHB buffer. If we do not do so, we may read out the wrong + * data. The spec tells us reset the AHB domain and Serial Flash domain at + * the same time. + */ +static void fsl_qspi_invalidate(struct fsl_qspi *q) +{ + u32 reg; + + reg = qspi_readl(q, q->iobase + QUADSPI_MCR); + reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; + qspi_writel(q, reg, q->iobase + QUADSPI_MCR); + + /* + * The minimum delay : 1 AHB + 2 SFCK clocks. + * Delay 1 us is enough. + */ + udelay(1); + + reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); + qspi_writel(q, reg, q->iobase + QUADSPI_MCR); +} + +static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi) +{ + unsigned long rate = spi->max_speed_hz; + int ret; + + if (q->selected == spi->chip_select) + return; + + if (needs_4x_clock(q)) + rate *= 4; + + fsl_qspi_clk_disable_unprep(q); + + ret = clk_set_rate(q->clk, rate); + if (ret) + return; + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) + return; + + q->selected = spi->chip_select; + + fsl_qspi_invalidate(q); +} + +static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op) +{ + memcpy_fromio(op->data.buf.in, + q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size, + op->data.nbytes); +} + +static void fsl_qspi_fill_txfifo(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int i; + u32 val; + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { + memcpy(&val, op->data.buf.out + i, 4); + val = fsl_qspi_endian_xchg(q, val); + qspi_writel(q, val, base + QUADSPI_TBDR); + } + + if (i < op->data.nbytes) { + memcpy(&val, op->data.buf.out + i, op->data.nbytes - i); + val = fsl_qspi_endian_xchg(q, val); + qspi_writel(q, val, base + QUADSPI_TBDR); + } + + if (needs_fill_txfifo(q)) { + for (i = op->data.nbytes; i < 16; i += 4) + qspi_writel(q, 0, base + QUADSPI_TBDR); + } +} + +static void fsl_qspi_read_rxfifo(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int i; + u8 *buf = op->data.buf.in; + u32 val; + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { + val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); + val = fsl_qspi_endian_xchg(q, val); + memcpy(buf + i, &val, 4); + } + + if (i < op->data.nbytes) { + val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); + val = fsl_qspi_endian_xchg(q, val); + memcpy(buf + i, &val, op->data.nbytes - i); + } +} + +static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int err = 0; + + init_completion(&q->c); + + /* + * Always start the sequence at the same index since we update + * the LUT at each exec_op() call. And also specify the DATA + * length, since it's has not been specified in the LUT. + */ + qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT), + base + QUADSPI_IPCR); + + /* Wait for the interrupt. */ + if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + + if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) + fsl_qspi_read_rxfifo(q, op); + + return err; +} + +static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base, + u32 mask, u32 delay_us, u32 timeout_us) +{ + u32 reg; + + if (!q->devtype_data->little_endian) + mask = (u32)cpu_to_be32(mask); + + return readl_poll_timeout(base, reg, !(reg & mask), delay_us, + timeout_us); +} + +static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + void __iomem *base = q->iobase; + u32 addr_offset = 0; + int err = 0; + + mutex_lock(&q->lock); + + /* wait for the controller being ready */ + fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK | + QUADSPI_SR_AHB_ACC_MASK), 10, 1000); + + fsl_qspi_select_mem(q, mem->spi); + + if (needs_amba_base_offset(q)) + addr_offset = q->memmap_phy; + + qspi_writel(q, + q->selected * q->devtype_data->ahb_buf_size + addr_offset, + base + QUADSPI_SFAR); + + qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) | + QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK, + base + QUADSPI_MCR); + + qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC, + base + QUADSPI_SPTRCLR); + + fsl_qspi_prepare_lut(q, op); + + /* + * If we have large chunks of data, we read them through the AHB bus + * by accessing the mapped memory. In all other cases we use + * IP commands to access the flash. + */ + if (op->data.nbytes > (q->devtype_data->rxfifo - 4) && + op->data.dir == SPI_MEM_DATA_IN) { + fsl_qspi_read_ahb(q, op); + } else { + qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | + QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT); + + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) + fsl_qspi_fill_txfifo(q, op); + + err = fsl_qspi_do_op(q, op); + } + + /* Invalidate the data in the AHB buffer. */ + fsl_qspi_invalidate(q); + + mutex_unlock(&q->lock); + + return err; +} + +static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + + if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.nbytes > q->devtype_data->txfifo) + op->data.nbytes = q->devtype_data->txfifo; + } else { + if (op->data.nbytes > q->devtype_data->ahb_buf_size) + op->data.nbytes = q->devtype_data->ahb_buf_size; + else if (op->data.nbytes > (q->devtype_data->rxfifo - 4)) + op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8); + } + + return 0; +} + +static int fsl_qspi_default_setup(struct fsl_qspi *q) +{ + void __iomem *base = q->iobase; + u32 reg, addr_offset = 0; + int ret; + + /* disable and unprepare clock to avoid glitch pass to controller */ + fsl_qspi_clk_disable_unprep(q); + + /* the default frequency, we will change it later if necessary. */ + ret = clk_set_rate(q->clk, 66000000); + if (ret) + return ret; + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) + return ret; + + /* Reset the module */ + qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, + base + QUADSPI_MCR); + udelay(1); + + /* Disable the module */ + qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, + base + QUADSPI_MCR); + + reg = qspi_readl(q, base + QUADSPI_SMPR); + qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK + | QUADSPI_SMPR_FSPHS_MASK + | QUADSPI_SMPR_HSENA_MASK + | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); + + /* We only use the buffer3 for AHB read */ + qspi_writel(q, 0, base + QUADSPI_BUF0IND); + qspi_writel(q, 0, base + QUADSPI_BUF1IND); + qspi_writel(q, 0, base + QUADSPI_BUF2IND); + + qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT), + q->iobase + QUADSPI_BFGENCR); + qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT); + qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | + QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8), + base + QUADSPI_BUF3CR); + + if (needs_amba_base_offset(q)) + addr_offset = q->memmap_phy; + + /* + * In HW there can be a maximum of four chips on two buses with + * two chip selects on each bus. We use four chip selects in SW + * to differentiate between the four chips. + * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD, + * SFB2AD accordingly. + */ + qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset, + base + QUADSPI_SFA1AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset, + base + QUADSPI_SFA2AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset, + base + QUADSPI_SFB1AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset, + base + QUADSPI_SFB2AD); + + q->selected = -1; + + /* Enable the module */ + qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, + base + QUADSPI_MCR); + + /* clear all interrupt status */ + qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); + + /* enable the interrupt */ + qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); + + return 0; +} + +static const char *fsl_qspi_get_name(struct spi_mem *mem) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + struct device *dev = &mem->spi->dev; + const char *name; + + /* + * In order to keep mtdparts compatible with the old MTD driver at + * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the + * platform_device of the controller. + */ + if (of_get_available_child_count(q->dev->of_node) == 1) + return dev_name(q->dev); + + name = devm_kasprintf(dev, GFP_KERNEL, + "%s-%d", dev_name(q->dev), + mem->spi->chip_select); + + if (!name) { + dev_err(dev, "failed to get memory for custom flash name\n"); + return ERR_PTR(-ENOMEM); + } + + return name; +} + +static const struct spi_controller_mem_ops fsl_qspi_mem_ops = { + .adjust_op_size = fsl_qspi_adjust_op_size, + .supports_op = fsl_qspi_supports_op, + .exec_op = fsl_qspi_exec_op, + .get_name = fsl_qspi_get_name, +}; + +static int fsl_qspi_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct fsl_qspi *q; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*q)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD; + + q = spi_controller_get_devdata(ctlr); + q->dev = dev; + q->devtype_data = of_device_get_match_data(dev); + if (!q->devtype_data) { + ret = -ENODEV; + goto err_put_ctrl; + } + + platform_set_drvdata(pdev, q); + + /* find the resources */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); + q->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(q->iobase)) { + ret = PTR_ERR(q->iobase); + goto err_put_ctrl; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "QuadSPI-memory"); + q->ahb_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(q->ahb_addr)) { + ret = PTR_ERR(q->ahb_addr); + goto err_put_ctrl; + } + + q->memmap_phy = res->start; + + /* find the clocks */ + q->clk_en = devm_clk_get(dev, "qspi_en"); + if (IS_ERR(q->clk_en)) { + ret = PTR_ERR(q->clk_en); + goto err_put_ctrl; + } + + q->clk = devm_clk_get(dev, "qspi"); + if (IS_ERR(q->clk)) { + ret = PTR_ERR(q->clk); + goto err_put_ctrl; + } + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + goto err_put_ctrl; + } + + /* find the irq */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_err(dev, "failed to get the irq: %d\n", ret); + goto err_disable_clk; + } + + ret = devm_request_irq(dev, ret, + fsl_qspi_irq_handler, 0, pdev->name, q); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", ret); + goto err_disable_clk; + } + + mutex_init(&q->lock); + + ctlr->bus_num = -1; + ctlr->num_chipselect = 4; + ctlr->mem_ops = &fsl_qspi_mem_ops; + + fsl_qspi_default_setup(q); + + ctlr->dev.of_node = np; + + ret = spi_register_controller(ctlr); + if (ret) + goto err_destroy_mutex; + + return 0; + +err_destroy_mutex: + mutex_destroy(&q->lock); + +err_disable_clk: + fsl_qspi_clk_disable_unprep(q); + +err_put_ctrl: + spi_controller_put(ctlr); + + dev_err(dev, "Freescale QuadSPI probe failed\n"); + return ret; +} + +static int fsl_qspi_remove(struct platform_device *pdev) +{ + struct fsl_qspi *q = platform_get_drvdata(pdev); + + /* disable the hardware */ + qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); + qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); + + fsl_qspi_clk_disable_unprep(q); + + mutex_destroy(&q->lock); + + return 0; +} + +static int fsl_qspi_suspend(struct device *dev) +{ + return 0; +} + +static int fsl_qspi_resume(struct device *dev) +{ + struct fsl_qspi *q = dev_get_drvdata(dev); + + fsl_qspi_default_setup(q); + + return 0; +} + +static const struct of_device_id fsl_qspi_dt_ids[] = { + { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, }, + { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, }, + { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, }, + { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, }, + { .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, }, + { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); + +static const struct dev_pm_ops fsl_qspi_pm_ops = { + .suspend = fsl_qspi_suspend, + .resume = fsl_qspi_resume, +}; + +static struct platform_driver fsl_qspi_driver = { + .driver = { + .name = "fsl-quadspi", + .of_match_table = fsl_qspi_dt_ids, + .pm = &fsl_qspi_pm_ops, + }, + .probe = fsl_qspi_probe, + .remove = fsl_qspi_remove, +}; +module_platform_driver(fsl_qspi_driver); + +MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver"); +MODULE_AUTHOR("Freescale Semiconductor Inc."); +MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>"); +MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>"); +MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>"); +MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index fdb7cb88fb56..5f0b0d5bfef4 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -89,9 +89,6 @@ struct spi_geni_master { int irq; }; -static void handle_fifo_timeout(struct spi_master *spi, - struct spi_message *msg); - static int get_spi_clk_cfg(unsigned int speed_hz, struct spi_geni_master *mas, unsigned int *clk_idx, @@ -122,6 +119,32 @@ static int get_spi_clk_cfg(unsigned int speed_hz, return ret; } +static void handle_fifo_timeout(struct spi_master *spi, + struct spi_message *msg) +{ + struct spi_geni_master *mas = spi_master_get_devdata(spi); + unsigned long time_left, flags; + struct geni_se *se = &mas->se; + + spin_lock_irqsave(&mas->lock, flags); + reinit_completion(&mas->xfer_done); + mas->cur_mcmd = CMD_CANCEL; + geni_se_cancel_m_cmd(se); + writel(0, se->base + SE_GENI_TX_WATERMARK_REG); + spin_unlock_irqrestore(&mas->lock, flags); + time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); + if (time_left) + return; + + spin_lock_irqsave(&mas->lock, flags); + reinit_completion(&mas->xfer_done); + geni_se_abort_m_cmd(se); + spin_unlock_irqrestore(&mas->lock, flags); + time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); + if (!time_left) + dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); +} + static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) { struct spi_geni_master *mas = spi_master_get_devdata(slv->master); @@ -233,7 +256,6 @@ static int spi_geni_prepare_message(struct spi_master *spi, struct geni_se *se = &mas->se; geni_se_select_mode(se, GENI_SE_FIFO); - reinit_completion(&mas->xfer_done); ret = setup_fifo_params(spi_msg->spi, spi); if (ret) dev_err(mas->dev, "Couldn't select mode %d\n", ret); @@ -357,32 +379,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG); } -static void handle_fifo_timeout(struct spi_master *spi, - struct spi_message *msg) -{ - struct spi_geni_master *mas = spi_master_get_devdata(spi); - unsigned long time_left, flags; - struct geni_se *se = &mas->se; - - spin_lock_irqsave(&mas->lock, flags); - reinit_completion(&mas->xfer_done); - mas->cur_mcmd = CMD_CANCEL; - geni_se_cancel_m_cmd(se); - writel(0, se->base + SE_GENI_TX_WATERMARK_REG); - spin_unlock_irqrestore(&mas->lock, flags); - time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); - if (time_left) - return; - - spin_lock_irqsave(&mas->lock, flags); - reinit_completion(&mas->xfer_done); - geni_se_abort_m_cmd(se); - spin_unlock_irqrestore(&mas->lock, flags); - time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); - if (!time_left) - dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); -} - static int spi_geni_transfer_one(struct spi_master *spi, struct spi_device *slv, struct spi_transfer *xfer) diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index a4aee26028cd..53b35c56a557 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -428,7 +428,8 @@ static int spi_gpio_probe(struct platform_device *pdev) return status; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); - master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL; + master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL | + SPI_CS_HIGH; master->flags = master_flags; master->bus_num = pdev->id; /* The master needs to think there is a chipselect even if not connected */ @@ -455,7 +456,6 @@ static int spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; } spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; - spi_gpio->bitbang.flags = SPI_CS_HIGH; status = spi_bitbang_start(&spi_gpio->bitbang); if (status) diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 5217a5628be2..a4d8d19ecff9 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -537,7 +537,6 @@ EXPORT_SYMBOL_GPL(spi_mem_dirmap_create); /** * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor * @desc: the direct mapping descriptor to destroy - * @info: direct mapping information * * This function destroys a direct mapping descriptor previously created by * spi_mem_dirmap_create(). @@ -548,9 +547,80 @@ void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) ctlr->mem_ops->dirmap_destroy(desc); + + kfree(desc); } EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); +static void devm_spi_mem_dirmap_release(struct device *dev, void *res) +{ + struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res; + + spi_mem_dirmap_destroy(desc); +} + +/** + * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach + * it to a device + * @dev: device the dirmap desc will be attached to + * @mem: SPI mem device this direct mapping should be created for + * @info: direct mapping information + * + * devm_ variant of the spi_mem_dirmap_create() function. See + * spi_mem_dirmap_create() for more details. + * + * Return: a valid pointer in case of success, and ERR_PTR() otherwise. + */ +struct spi_mem_dirmap_desc * +devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, + const struct spi_mem_dirmap_info *info) +{ + struct spi_mem_dirmap_desc **ptr, *desc; + + ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + desc = spi_mem_dirmap_create(mem, info); + if (IS_ERR(desc)) { + devres_free(ptr); + } else { + *ptr = desc; + devres_add(dev, ptr); + } + + return desc; +} +EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create); + +static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data) +{ + struct spi_mem_dirmap_desc **ptr = res; + + if (WARN_ON(!ptr || !*ptr)) + return 0; + + return *ptr == data; +} + +/** + * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached + * to a device + * @dev: device the dirmap desc is attached to + * @desc: the direct mapping descriptor to destroy + * + * devm_ variant of the spi_mem_dirmap_destroy() function. See + * spi_mem_dirmap_destroy() for more details. + */ +void devm_spi_mem_dirmap_destroy(struct device *dev, + struct spi_mem_dirmap_desc *desc) +{ + devres_release(dev, devm_spi_mem_dirmap_release, + devm_spi_mem_dirmap_match, desc); +} +EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy); + /** * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping * @desc: direct mapping descriptor diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 6ac95a2a21ce..7bf53cfc25d6 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -39,6 +39,7 @@ #include <linux/stmp_device.h> #include <linux/spi/spi.h> #include <linux/spi/mxs-spi.h> +#include <trace/events/spi.h> #define DRIVER_NAME "mxs-spi" @@ -374,6 +375,8 @@ static int mxs_spi_transfer_one(struct spi_master *master, list_for_each_entry(t, &m->transfers, transfer_list) { + trace_spi_transfer_start(m, t); + status = mxs_spi_setup_transfer(m->spi, t); if (status) break; @@ -419,6 +422,8 @@ static int mxs_spi_transfer_one(struct spi_master *master, flag); } + trace_spi_transfer_stop(m, t); + if (status) { stmp_reset_block(ssp->base); break; diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c index e1dca79b9090..734a2b956959 100644 --- a/drivers/spi/spi-npcm-pspi.c +++ b/drivers/spi/spi-npcm-pspi.c @@ -465,7 +465,8 @@ out_master_put: static int npcm_pspi_remove(struct platform_device *pdev) { - struct npcm_pspi *priv = platform_get_drvdata(pdev); + struct spi_master *master = platform_get_drvdata(pdev); + struct npcm_pspi *priv = spi_master_get_devdata(master); npcm_pspi_reset_hw(priv); clk_disable_unprepare(priv->clk); diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c new file mode 100644 index 000000000000..8894f98cc99c --- /dev/null +++ b/drivers/spi/spi-nxp-fspi.c @@ -0,0 +1,1106 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * NXP FlexSPI(FSPI) controller driver. + * + * Copyright 2019 NXP. + * + * FlexSPI is a flexsible SPI host controller which supports two SPI + * channels and up to 4 external devices. Each channel supports + * Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional + * data lines). + * + * FlexSPI controller is driven by the LUT(Look-up Table) registers + * LUT registers are a look-up-table for sequences of instructions. + * A valid sequence consists of four LUT registers. + * Maximum 32 LUT sequences can be programmed simultaneously. + * + * LUTs are being created at run-time based on the commands passed + * from the spi-mem framework, thus using single LUT index. + * + * Software triggered Flash read/write access by IP Bus. + * + * Memory mapped read access by AHB Bus. + * + * Based on SPI MEM interface and spi-fsl-qspi.c driver. + * + * Author: + * Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com> + * Boris Brezillon <bbrezillon@kernel.org> + * Frieder Schrempf <frieder.schrempf@kontron.de> + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_qos.h> +#include <linux/sizes.h> + +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +/* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry (31). + */ +#define SEQID_LUT 31 + +/* Registers used by the driver */ +#define FSPI_MCR0 0x00 +#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) +#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16) +#define FSPI_MCR0_LEARN_EN BIT(15) +#define FSPI_MCR0_SCRFRUN_EN BIT(14) +#define FSPI_MCR0_OCTCOMB_EN BIT(13) +#define FSPI_MCR0_DOZE_EN BIT(12) +#define FSPI_MCR0_HSEN BIT(11) +#define FSPI_MCR0_SERCLKDIV BIT(8) +#define FSPI_MCR0_ATDF_EN BIT(7) +#define FSPI_MCR0_ARDF_EN BIT(6) +#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4) +#define FSPI_MCR0_END_CFG(x) ((x) << 2) +#define FSPI_MCR0_MDIS BIT(1) +#define FSPI_MCR0_SWRST BIT(0) + +#define FSPI_MCR1 0x04 +#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16) +#define FSPI_MCR1_AHB_TIMEOUT(x) (x) + +#define FSPI_MCR2 0x08 +#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24) +#define FSPI_MCR2_SAMEDEVICEEN BIT(15) +#define FSPI_MCR2_CLRLRPHS BIT(14) +#define FSPI_MCR2_ABRDATSZ BIT(8) +#define FSPI_MCR2_ABRLEARN BIT(7) +#define FSPI_MCR2_ABR_READ BIT(6) +#define FSPI_MCR2_ABRWRITE BIT(5) +#define FSPI_MCR2_ABRDUMMY BIT(4) +#define FSPI_MCR2_ABR_MODE BIT(3) +#define FSPI_MCR2_ABRCADDR BIT(2) +#define FSPI_MCR2_ABRRADDR BIT(1) +#define FSPI_MCR2_ABR_CMD BIT(0) + +#define FSPI_AHBCR 0x0c +#define FSPI_AHBCR_RDADDROPT BIT(6) +#define FSPI_AHBCR_PREF_EN BIT(5) +#define FSPI_AHBCR_BUFF_EN BIT(4) +#define FSPI_AHBCR_CACH_EN BIT(3) +#define FSPI_AHBCR_CLRTXBUF BIT(2) +#define FSPI_AHBCR_CLRRXBUF BIT(1) +#define FSPI_AHBCR_PAR_EN BIT(0) + +#define FSPI_INTEN 0x10 +#define FSPI_INTEN_SCLKSBWR BIT(9) +#define FSPI_INTEN_SCLKSBRD BIT(8) +#define FSPI_INTEN_DATALRNFL BIT(7) +#define FSPI_INTEN_IPTXWE BIT(6) +#define FSPI_INTEN_IPRXWA BIT(5) +#define FSPI_INTEN_AHBCMDERR BIT(4) +#define FSPI_INTEN_IPCMDERR BIT(3) +#define FSPI_INTEN_AHBCMDGE BIT(2) +#define FSPI_INTEN_IPCMDGE BIT(1) +#define FSPI_INTEN_IPCMDDONE BIT(0) + +#define FSPI_INTR 0x14 +#define FSPI_INTR_SCLKSBWR BIT(9) +#define FSPI_INTR_SCLKSBRD BIT(8) +#define FSPI_INTR_DATALRNFL BIT(7) +#define FSPI_INTR_IPTXWE BIT(6) +#define FSPI_INTR_IPRXWA BIT(5) +#define FSPI_INTR_AHBCMDERR BIT(4) +#define FSPI_INTR_IPCMDERR BIT(3) +#define FSPI_INTR_AHBCMDGE BIT(2) +#define FSPI_INTR_IPCMDGE BIT(1) +#define FSPI_INTR_IPCMDDONE BIT(0) + +#define FSPI_LUTKEY 0x18 +#define FSPI_LUTKEY_VALUE 0x5AF05AF0 + +#define FSPI_LCKCR 0x1C + +#define FSPI_LCKER_LOCK 0x1 +#define FSPI_LCKER_UNLOCK 0x2 + +#define FSPI_BUFXCR_INVALID_MSTRID 0xE +#define FSPI_AHBRX_BUF0CR0 0x20 +#define FSPI_AHBRX_BUF1CR0 0x24 +#define FSPI_AHBRX_BUF2CR0 0x28 +#define FSPI_AHBRX_BUF3CR0 0x2C +#define FSPI_AHBRX_BUF4CR0 0x30 +#define FSPI_AHBRX_BUF5CR0 0x34 +#define FSPI_AHBRX_BUF6CR0 0x38 +#define FSPI_AHBRX_BUF7CR0 0x3C +#define FSPI_AHBRXBUF0CR7_PREF BIT(31) + +#define FSPI_AHBRX_BUF0CR1 0x40 +#define FSPI_AHBRX_BUF1CR1 0x44 +#define FSPI_AHBRX_BUF2CR1 0x48 +#define FSPI_AHBRX_BUF3CR1 0x4C +#define FSPI_AHBRX_BUF4CR1 0x50 +#define FSPI_AHBRX_BUF5CR1 0x54 +#define FSPI_AHBRX_BUF6CR1 0x58 +#define FSPI_AHBRX_BUF7CR1 0x5C + +#define FSPI_FLSHA1CR0 0x60 +#define FSPI_FLSHA2CR0 0x64 +#define FSPI_FLSHB1CR0 0x68 +#define FSPI_FLSHB2CR0 0x6C +#define FSPI_FLSHXCR0_SZ_KB 10 +#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB) + +#define FSPI_FLSHA1CR1 0x70 +#define FSPI_FLSHA2CR1 0x74 +#define FSPI_FLSHB1CR1 0x78 +#define FSPI_FLSHB2CR1 0x7C +#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16) +#define FSPI_FLSHXCR1_CAS(x) ((x) << 11) +#define FSPI_FLSHXCR1_WA BIT(10) +#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5) +#define FSPI_FLSHXCR1_TCSS(x) (x) + +#define FSPI_FLSHA1CR2 0x80 +#define FSPI_FLSHA2CR2 0x84 +#define FSPI_FLSHB1CR2 0x88 +#define FSPI_FLSHB2CR2 0x8C +#define FSPI_FLSHXCR2_CLRINSP BIT(24) +#define FSPI_FLSHXCR2_AWRWAIT BIT(16) +#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13 +#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8 +#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5 +#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0 + +#define FSPI_IPCR0 0xA0 + +#define FSPI_IPCR1 0xA4 +#define FSPI_IPCR1_IPAREN BIT(31) +#define FSPI_IPCR1_SEQNUM_SHIFT 24 +#define FSPI_IPCR1_SEQID_SHIFT 16 +#define FSPI_IPCR1_IDATSZ(x) (x) + +#define FSPI_IPCMD 0xB0 +#define FSPI_IPCMD_TRG BIT(0) + +#define FSPI_DLPR 0xB4 + +#define FSPI_IPRXFCR 0xB8 +#define FSPI_IPRXFCR_CLR BIT(0) +#define FSPI_IPRXFCR_DMA_EN BIT(1) +#define FSPI_IPRXFCR_WMRK(x) ((x) << 2) + +#define FSPI_IPTXFCR 0xBC +#define FSPI_IPTXFCR_CLR BIT(0) +#define FSPI_IPTXFCR_DMA_EN BIT(1) +#define FSPI_IPTXFCR_WMRK(x) ((x) << 2) + +#define FSPI_DLLACR 0xC0 +#define FSPI_DLLACR_OVRDEN BIT(8) + +#define FSPI_DLLBCR 0xC4 +#define FSPI_DLLBCR_OVRDEN BIT(8) + +#define FSPI_STS0 0xE0 +#define FSPI_STS0_DLPHB(x) ((x) << 8) +#define FSPI_STS0_DLPHA(x) ((x) << 4) +#define FSPI_STS0_CMD_SRC(x) ((x) << 2) +#define FSPI_STS0_ARB_IDLE BIT(1) +#define FSPI_STS0_SEQ_IDLE BIT(0) + +#define FSPI_STS1 0xE4 +#define FSPI_STS1_IP_ERRCD(x) ((x) << 24) +#define FSPI_STS1_IP_ERRID(x) ((x) << 16) +#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8) +#define FSPI_STS1_AHB_ERRID(x) (x) + +#define FSPI_AHBSPNST 0xEC +#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16) +#define FSPI_AHBSPNST_BUFID(x) ((x) << 1) +#define FSPI_AHBSPNST_ACTIVE BIT(0) + +#define FSPI_IPRXFSTS 0xF0 +#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16) +#define FSPI_IPRXFSTS_FILL(x) (x) + +#define FSPI_IPTXFSTS 0xF4 +#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16) +#define FSPI_IPTXFSTS_FILL(x) (x) + +#define FSPI_RFDR 0x100 +#define FSPI_TFDR 0x180 + +#define FSPI_LUT_BASE 0x200 +#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) +#define FSPI_LUT_REG(idx) \ + (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4) + +/* register map end */ + +/* Instruction set for the LUT register. */ +#define LUT_STOP 0x00 +#define LUT_CMD 0x01 +#define LUT_ADDR 0x02 +#define LUT_CADDR_SDR 0x03 +#define LUT_MODE 0x04 +#define LUT_MODE2 0x05 +#define LUT_MODE4 0x06 +#define LUT_MODE8 0x07 +#define LUT_NXP_WRITE 0x08 +#define LUT_NXP_READ 0x09 +#define LUT_LEARN_SDR 0x0A +#define LUT_DATSZ_SDR 0x0B +#define LUT_DUMMY 0x0C +#define LUT_DUMMY_RWDS_SDR 0x0D +#define LUT_JMP_ON_CS 0x1F +#define LUT_CMD_DDR 0x21 +#define LUT_ADDR_DDR 0x22 +#define LUT_CADDR_DDR 0x23 +#define LUT_MODE_DDR 0x24 +#define LUT_MODE2_DDR 0x25 +#define LUT_MODE4_DDR 0x26 +#define LUT_MODE8_DDR 0x27 +#define LUT_WRITE_DDR 0x28 +#define LUT_READ_DDR 0x29 +#define LUT_LEARN_DDR 0x2A +#define LUT_DATSZ_DDR 0x2B +#define LUT_DUMMY_DDR 0x2C +#define LUT_DUMMY_RWDS_DDR 0x2D + +/* + * Calculate number of required PAD bits for LUT register. + * + * The pad stands for the number of IO lines [0:7]. + * For example, the octal read needs eight IO lines, + * so you should use LUT_PAD(8). This macro + * returns 3 i.e. use eight (2^3) IP lines for read. + */ +#define LUT_PAD(x) (fls(x) - 1) + +/* + * Macro for constructing the LUT entries with the following + * register layout: + * + * --------------------------------------------------- + * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | + * --------------------------------------------------- + */ +#define PAD_SHIFT 8 +#define INSTR_SHIFT 10 +#define OPRND_SHIFT 16 + +/* Macros for constructing the LUT register. */ +#define LUT_DEF(idx, ins, pad, opr) \ + ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \ + (opr)) << (((idx) % 2) * OPRND_SHIFT)) + +#define POLL_TOUT 5000 +#define NXP_FSPI_MAX_CHIPSELECT 4 + +struct nxp_fspi_devtype_data { + unsigned int rxfifo; + unsigned int txfifo; + unsigned int ahb_buf_size; + unsigned int quirks; + bool little_endian; +}; + +static const struct nxp_fspi_devtype_data lx2160a_data = { + .rxfifo = SZ_512, /* (64 * 64 bits) */ + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, + .little_endian = true, /* little-endian */ +}; + +struct nxp_fspi { + void __iomem *iobase; + void __iomem *ahb_addr; + u32 memmap_phy; + u32 memmap_phy_size; + struct clk *clk, *clk_en; + struct device *dev; + struct completion c; + const struct nxp_fspi_devtype_data *devtype_data; + struct mutex lock; + struct pm_qos_request pm_qos_req; + int selected; +}; + +/* + * R/W functions for big- or little-endian registers: + * The FSPI controller's endianness is independent of + * the CPU core's endianness. So far, although the CPU + * core is little-endian the FSPI controller can use + * big-endian or little-endian. + */ +static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr) +{ + if (f->devtype_data->little_endian) + iowrite32(val, addr); + else + iowrite32be(val, addr); +} + +static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr) +{ + if (f->devtype_data->little_endian) + return ioread32(addr); + else + return ioread32be(addr); +} + +static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id) +{ + struct nxp_fspi *f = dev_id; + u32 reg; + + /* clear interrupt */ + reg = fspi_readl(f, f->iobase + FSPI_INTR); + fspi_writel(f, FSPI_INTR_IPCMDDONE, f->iobase + FSPI_INTR); + + if (reg & FSPI_INTR_IPCMDDONE) + complete(&f->c); + + return IRQ_HANDLED; +} + +static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width) +{ + switch (width) { + case 1: + case 2: + case 4: + case 8: + return 0; + } + + return -ENOTSUPP; +} + +static bool nxp_fspi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + int ret; + + ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth); + + if (op->addr.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth); + + if (op->dummy.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth); + + if (op->data.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->data.buswidth); + + if (ret) + return false; + + /* + * The number of address bytes should be equal to or less than 4 bytes. + */ + if (op->addr.nbytes > 4) + return false; + + /* + * If requested address value is greater than controller assigned + * memory mapped space, return error as it didn't fit in the range + * of assigned address space. + */ + if (op->addr.val >= f->memmap_phy_size) + return false; + + /* Max 64 dummy clock cycles supported */ + if (op->dummy.buswidth && + (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) + return false; + + /* Max data length, check controller limits and alignment */ + if (op->data.dir == SPI_MEM_DATA_IN && + (op->data.nbytes > f->devtype_data->ahb_buf_size || + (op->data.nbytes > f->devtype_data->rxfifo - 4 && + !IS_ALIGNED(op->data.nbytes, 8)))) + return false; + + if (op->data.dir == SPI_MEM_DATA_OUT && + op->data.nbytes > f->devtype_data->txfifo) + return false; + + return true; +} + +/* Instead of busy looping invoke readl_poll_timeout functionality. */ +static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base, + u32 mask, u32 delay_us, + u32 timeout_us, bool c) +{ + u32 reg; + + if (!f->devtype_data->little_endian) + mask = (u32)cpu_to_be32(mask); + + if (c) + return readl_poll_timeout(base, reg, (reg & mask), + delay_us, timeout_us); + else + return readl_poll_timeout(base, reg, !(reg & mask), + delay_us, timeout_us); +} + +/* + * If the slave device content being changed by Write/Erase, need to + * invalidate the AHB buffer. This can be achieved by doing the reset + * of controller after setting MCR0[SWRESET] bit. + */ +static inline void nxp_fspi_invalid(struct nxp_fspi *f) +{ + u32 reg; + int ret; + + reg = fspi_readl(f, f->iobase + FSPI_MCR0); + fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0); + + /* w1c register, wait unit clear */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0, + FSPI_MCR0_SWRST, 0, POLL_TOUT, false); + WARN_ON(ret); +} + +static void nxp_fspi_prepare_lut(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + u32 lutval[4] = {}; + int lutidx = 1, i; + + /* cmd */ + lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), + op->cmd.opcode); + + /* addr bytes */ + if (op->addr.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR, + LUT_PAD(op->addr.buswidth), + op->addr.nbytes * 8); + lutidx++; + } + + /* dummy bytes, if needed */ + if (op->dummy.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY, + /* + * Due to FlexSPI controller limitation number of PAD for dummy + * buswidth needs to be programmed as equal to data buswidth. + */ + LUT_PAD(op->data.buswidth), + op->dummy.nbytes * 8 / + op->dummy.buswidth); + lutidx++; + } + + /* read/write data bytes */ + if (op->data.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, + op->data.dir == SPI_MEM_DATA_IN ? + LUT_NXP_READ : LUT_NXP_WRITE, + LUT_PAD(op->data.buswidth), + 0); + lutidx++; + } + + /* stop condition. */ + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0); + + /* unlock LUT */ + fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY); + fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR); + + /* fill LUT */ + for (i = 0; i < ARRAY_SIZE(lutval); i++) + fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i)); + + dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n", + op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]); + + /* lock LUT */ + fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY); + fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR); +} + +static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f) +{ + int ret; + + ret = clk_prepare_enable(f->clk_en); + if (ret) + return ret; + + ret = clk_prepare_enable(f->clk); + if (ret) { + clk_disable_unprepare(f->clk_en); + return ret; + } + + return 0; +} + +static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f) +{ + clk_disable_unprepare(f->clk); + clk_disable_unprepare(f->clk_en); +} + +/* + * In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0 + * register and start base address of the slave device. + * + * (Higher address) + * -------- <-- FLSHB2CR0 + * | B2 | + * | | + * B2 start address --> -------- <-- FLSHB1CR0 + * | B1 | + * | | + * B1 start address --> -------- <-- FLSHA2CR0 + * | A2 | + * | | + * A2 start address --> -------- <-- FLSHA1CR0 + * | A1 | + * | | + * A1 start address --> -------- (Lower address) + * + * + * Start base address defines the starting address range for given CS and + * FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS. + * + * But, different targets are having different combinations of number of CS, + * some targets only have single CS or two CS covering controller's full + * memory mapped space area. + * Thus, implementation is being done as independent of the size and number + * of the connected slave device. + * Assign controller memory mapped space size as the size to the connected + * slave device. + * Mark FLSHxxCR0 as zero initially and then assign value only to the selected + * chip-select Flash configuration register. + * + * For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the + * memory mapped size of the controller. + * Value for rest of the CS FLSHxxCR0 register would be zero. + * + */ +static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi) +{ + unsigned long rate = spi->max_speed_hz; + int ret; + uint64_t size_kb; + + /* + * Return, if previously selected slave device is same as current + * requested slave device. + */ + if (f->selected == spi->chip_select) + return; + + /* Reset FLSHxxCR0 registers */ + fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0); + + /* Assign controller memory mapped space as size, KBytes, of flash. */ + size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size); + + fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 + + 4 * spi->chip_select); + + dev_dbg(f->dev, "Slave device [CS:%x] selected\n", spi->chip_select); + + nxp_fspi_clk_disable_unprep(f); + + ret = clk_set_rate(f->clk, rate); + if (ret) + return; + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) + return; + + f->selected = spi->chip_select; +} + +static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) +{ + u32 len = op->data.nbytes; + + /* Read out the data directly from the AHB buffer. */ + memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len); +} + +static void nxp_fspi_fill_txfifo(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int i, ret; + u8 *buf = (u8 *) op->data.buf.out; + + /* clear the TX FIFO. */ + fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR); + + /* + * Default value of water mark level is 8 bytes, hence in single + * write request controller can write max 8 bytes of data. + */ + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) { + /* Wait for TXFIFO empty */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPTXWE, 0, + POLL_TOUT, true); + WARN_ON(ret); + + fspi_writel(f, *(u32 *) (buf + i), base + FSPI_TFDR); + fspi_writel(f, *(u32 *) (buf + i + 4), base + FSPI_TFDR + 4); + fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR); + } + + if (i < op->data.nbytes) { + u32 data = 0; + int j; + /* Wait for TXFIFO empty */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPTXWE, 0, + POLL_TOUT, true); + WARN_ON(ret); + + for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) { + memcpy(&data, buf + i + j, 4); + fspi_writel(f, data, base + FSPI_TFDR + j); + } + fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR); + } +} + +static void nxp_fspi_read_rxfifo(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int i, ret; + int len = op->data.nbytes; + u8 *buf = (u8 *) op->data.buf.in; + + /* + * Default value of water mark level is 8 bytes, hence in single + * read request controller can read max 8 bytes of data. + */ + for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) { + /* Wait for RXFIFO available */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPRXWA, 0, + POLL_TOUT, true); + WARN_ON(ret); + + *(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR); + *(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4); + /* move the FIFO pointer */ + fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR); + } + + if (i < len) { + u32 tmp; + int size, j; + + buf = op->data.buf.in + i; + /* Wait for RXFIFO available */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPRXWA, 0, + POLL_TOUT, true); + WARN_ON(ret); + + len = op->data.nbytes - i; + for (j = 0; j < op->data.nbytes - i; j += 4) { + tmp = fspi_readl(f, base + FSPI_RFDR + j); + size = min(len, 4); + memcpy(buf + j, &tmp, size); + len -= size; + } + } + + /* invalid the RXFIFO */ + fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR); + /* move the FIFO pointer */ + fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR); +} + +static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int seqnum = 0; + int err = 0; + u32 reg; + + reg = fspi_readl(f, base + FSPI_IPRXFCR); + /* invalid RXFIFO first */ + reg &= ~FSPI_IPRXFCR_DMA_EN; + reg = reg | FSPI_IPRXFCR_CLR; + fspi_writel(f, reg, base + FSPI_IPRXFCR); + + init_completion(&f->c); + + fspi_writel(f, op->addr.val, base + FSPI_IPCR0); + /* + * Always start the sequence at the same index since we update + * the LUT at each exec_op() call. And also specify the DATA + * length, since it's has not been specified in the LUT. + */ + fspi_writel(f, op->data.nbytes | + (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) | + (seqnum << FSPI_IPCR1_SEQNUM_SHIFT), + base + FSPI_IPCR1); + + /* Trigger the LUT now. */ + fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD); + + /* Wait for the interrupt. */ + if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + + /* Invoke IP data read, if request is of data read. */ + if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) + nxp_fspi_read_rxfifo(f, op); + + return err; +} + +static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + int err = 0; + + mutex_lock(&f->lock); + + /* Wait for controller being ready. */ + err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0, + FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true); + WARN_ON(err); + + nxp_fspi_select_mem(f, mem->spi); + + nxp_fspi_prepare_lut(f, op); + /* + * If we have large chunks of data, we read them through the AHB bus + * by accessing the mapped memory. In all other cases we use + * IP commands to access the flash. + */ + if (op->data.nbytes > (f->devtype_data->rxfifo - 4) && + op->data.dir == SPI_MEM_DATA_IN) { + nxp_fspi_read_ahb(f, op); + } else { + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) + nxp_fspi_fill_txfifo(f, op); + + err = nxp_fspi_do_op(f, op); + } + + /* Invalidate the data in the AHB buffer. */ + nxp_fspi_invalid(f); + + mutex_unlock(&f->lock); + + return err; +} + +static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + + if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.nbytes > f->devtype_data->txfifo) + op->data.nbytes = f->devtype_data->txfifo; + } else { + if (op->data.nbytes > f->devtype_data->ahb_buf_size) + op->data.nbytes = f->devtype_data->ahb_buf_size; + else if (op->data.nbytes > (f->devtype_data->rxfifo - 4)) + op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8); + } + + return 0; +} + +static int nxp_fspi_default_setup(struct nxp_fspi *f) +{ + void __iomem *base = f->iobase; + int ret, i; + u32 reg; + + /* disable and unprepare clock to avoid glitch pass to controller */ + nxp_fspi_clk_disable_unprep(f); + + /* the default frequency, we will change it later if necessary. */ + ret = clk_set_rate(f->clk, 20000000); + if (ret) + return ret; + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) + return ret; + + /* Reset the module */ + /* w1c register, wait unit clear */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0, + FSPI_MCR0_SWRST, 0, POLL_TOUT, false); + WARN_ON(ret); + + /* Disable the module */ + fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0); + + /* Reset the DLL register to default value */ + fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR); + fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR); + + /* enable module */ + fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF), + base + FSPI_MCR0); + + /* + * Disable same device enable bit and configure all slave devices + * independently. + */ + reg = fspi_readl(f, f->iobase + FSPI_MCR2); + reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN); + fspi_writel(f, reg, base + FSPI_MCR2); + + /* AHB configuration for access buffer 0~7. */ + for (i = 0; i < 7; i++) + fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i); + + /* + * Set ADATSZ with the maximum AHB buffer size to improve the read + * performance. + */ + fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 | + FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0); + + /* prefetch and no start address alignment limitation */ + fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT, + base + FSPI_AHBCR); + + /* AHB Read - Set lut sequence ID for all CS. */ + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2); + + f->selected = -1; + + /* enable the interrupt */ + fspi_writel(f, FSPI_INTEN_IPCMDDONE, base + FSPI_INTEN); + + return 0; +} + +static const char *nxp_fspi_get_name(struct spi_mem *mem) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + struct device *dev = &mem->spi->dev; + const char *name; + + // Set custom name derived from the platform_device of the controller. + if (of_get_available_child_count(f->dev->of_node) == 1) + return dev_name(f->dev); + + name = devm_kasprintf(dev, GFP_KERNEL, + "%s-%d", dev_name(f->dev), + mem->spi->chip_select); + + if (!name) { + dev_err(dev, "failed to get memory for custom flash name\n"); + return ERR_PTR(-ENOMEM); + } + + return name; +} + +static const struct spi_controller_mem_ops nxp_fspi_mem_ops = { + .adjust_op_size = nxp_fspi_adjust_op_size, + .supports_op = nxp_fspi_supports_op, + .exec_op = nxp_fspi_exec_op, + .get_name = nxp_fspi_get_name, +}; + +static int nxp_fspi_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct nxp_fspi *f; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*f)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL | + SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL; + + f = spi_controller_get_devdata(ctlr); + f->dev = dev; + f->devtype_data = of_device_get_match_data(dev); + if (!f->devtype_data) { + ret = -ENODEV; + goto err_put_ctrl; + } + + platform_set_drvdata(pdev, f); + + /* find the resources - configuration register address space */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_base"); + f->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(f->iobase)) { + ret = PTR_ERR(f->iobase); + goto err_put_ctrl; + } + + /* find the resources - controller memory mapped space */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); + f->ahb_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(f->ahb_addr)) { + ret = PTR_ERR(f->ahb_addr); + goto err_put_ctrl; + } + + /* assign memory mapped starting address and mapped size. */ + f->memmap_phy = res->start; + f->memmap_phy_size = resource_size(res); + + /* find the clocks */ + f->clk_en = devm_clk_get(dev, "fspi_en"); + if (IS_ERR(f->clk_en)) { + ret = PTR_ERR(f->clk_en); + goto err_put_ctrl; + } + + f->clk = devm_clk_get(dev, "fspi"); + if (IS_ERR(f->clk)) { + ret = PTR_ERR(f->clk); + goto err_put_ctrl; + } + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + goto err_put_ctrl; + } + + /* find the irq */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_err(dev, "failed to get the irq: %d\n", ret); + goto err_disable_clk; + } + + ret = devm_request_irq(dev, ret, + nxp_fspi_irq_handler, 0, pdev->name, f); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", ret); + goto err_disable_clk; + } + + mutex_init(&f->lock); + + ctlr->bus_num = -1; + ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; + ctlr->mem_ops = &nxp_fspi_mem_ops; + + nxp_fspi_default_setup(f); + + ctlr->dev.of_node = np; + + ret = spi_register_controller(ctlr); + if (ret) + goto err_destroy_mutex; + + return 0; + +err_destroy_mutex: + mutex_destroy(&f->lock); + +err_disable_clk: + nxp_fspi_clk_disable_unprep(f); + +err_put_ctrl: + spi_controller_put(ctlr); + + dev_err(dev, "NXP FSPI probe failed\n"); + return ret; +} + +static int nxp_fspi_remove(struct platform_device *pdev) +{ + struct nxp_fspi *f = platform_get_drvdata(pdev); + + /* disable the hardware */ + fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0); + + nxp_fspi_clk_disable_unprep(f); + + mutex_destroy(&f->lock); + + return 0; +} + +static int nxp_fspi_suspend(struct device *dev) +{ + return 0; +} + +static int nxp_fspi_resume(struct device *dev) +{ + struct nxp_fspi *f = dev_get_drvdata(dev); + + nxp_fspi_default_setup(f); + + return 0; +} + +static const struct of_device_id nxp_fspi_dt_ids[] = { + { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); + +static const struct dev_pm_ops nxp_fspi_pm_ops = { + .suspend = nxp_fspi_suspend, + .resume = nxp_fspi_resume, +}; + +static struct platform_driver nxp_fspi_driver = { + .driver = { + .name = "nxp-fspi", + .of_match_table = nxp_fspi_dt_ids, + .pm = &nxp_fspi_pm_ops, + }, + .probe = nxp_fspi_probe, + .remove = nxp_fspi_remove, +}; +module_platform_driver(nxp_fspi_driver); + +MODULE_DESCRIPTION("NXP FSPI Controller Driver"); +MODULE_AUTHOR("NXP Semiconductor"); +MODULE_AUTHOR("Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>"); +MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>"); +MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 2fd8881fcd65..8be304379628 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -623,8 +623,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; cfg.src_addr_width = width; cfg.dst_addr_width = width; - cfg.src_maxburst = es; - cfg.dst_maxburst = es; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; rx = xfer->rx_buf; tx = xfer->tx_buf; diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 0c793e31d60f..26684178786f 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -253,6 +253,7 @@ #define STATE_RUNNING ((void *) 1) #define STATE_DONE ((void *) 2) #define STATE_ERROR ((void *) -1) +#define STATE_TIMEOUT ((void *) -2) /* * SSP State - Whether Enabled or Disabled @@ -1484,6 +1485,30 @@ err_config_dma: writew(irqflags, SSP_IMSC(pl022->virtbase)); } +static void print_current_status(struct pl022 *pl022) +{ + u32 read_cr0; + u16 read_cr1, read_dmacr, read_sr; + + if (pl022->vendor->extended_cr) + read_cr0 = readl(SSP_CR0(pl022->virtbase)); + else + read_cr0 = readw(SSP_CR0(pl022->virtbase)); + read_cr1 = readw(SSP_CR1(pl022->virtbase)); + read_dmacr = readw(SSP_DMACR(pl022->virtbase)); + read_sr = readw(SSP_SR(pl022->virtbase)); + + dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0); + dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1); + dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr); + dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr); + dev_warn(&pl022->adev->dev, + "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n", + pl022->exp_fifo_level, + pl022->vendor->fifodepth); + +} + static void do_polling_transfer(struct pl022 *pl022) { struct spi_message *message = NULL; @@ -1535,7 +1560,8 @@ static void do_polling_transfer(struct pl022 *pl022) if (time_after(time, timeout)) { dev_warn(&pl022->adev->dev, "%s: timeout!\n", __func__); - message->state = STATE_ERROR; + message->state = STATE_TIMEOUT; + print_current_status(pl022); goto out; } cpu_relax(); @@ -1553,6 +1579,8 @@ out: /* Handle end of message */ if (message->state == STATE_DONE) message->status = 0; + else if (message->state == STATE_TIMEOUT) + message->status = -EAGAIN; else message->status = -EIO; diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 2fa7f4b43492..15592598273e 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -23,7 +23,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, bool error) { - struct spi_message *msg = drv_data->master->cur_msg; + struct spi_message *msg = drv_data->controller->cur_msg; /* * It is possible that one CPU is handling ROR interrupt and other @@ -59,7 +59,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, msg->status = -EIO; } - spi_finalize_current_transfer(drv_data->master); + spi_finalize_current_transfer(drv_data->controller); } } @@ -74,7 +74,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, struct spi_transfer *xfer) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); enum dma_slave_buswidth width; struct dma_slave_config cfg; struct dma_chan *chan; @@ -102,14 +102,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, cfg.dst_maxburst = chip->dma_burst_size; sgt = &xfer->tx_sg; - chan = drv_data->master->dma_tx; + chan = drv_data->controller->dma_tx; } else { cfg.src_addr = drv_data->ssdr_physical; cfg.src_addr_width = width; cfg.src_maxburst = chip->dma_burst_size; sgt = &xfer->rx_sg; - chan = drv_data->master->dma_rx; + chan = drv_data->controller->dma_rx; } ret = dmaengine_slave_config(chan, &cfg); @@ -130,8 +130,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) if (status & SSSR_ROR) { dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); - dmaengine_terminate_async(drv_data->master->dma_rx); - dmaengine_terminate_async(drv_data->master->dma_tx); + dmaengine_terminate_async(drv_data->controller->dma_rx); + dmaengine_terminate_async(drv_data->controller->dma_tx); pxa2xx_spi_dma_transfer_complete(drv_data, true); return IRQ_HANDLED; @@ -171,15 +171,15 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, return 0; err_rx: - dmaengine_terminate_async(drv_data->master->dma_tx); + dmaengine_terminate_async(drv_data->controller->dma_tx); err_tx: return err; } void pxa2xx_spi_dma_start(struct driver_data *drv_data) { - dma_async_issue_pending(drv_data->master->dma_rx); - dma_async_issue_pending(drv_data->master->dma_tx); + dma_async_issue_pending(drv_data->controller->dma_rx); + dma_async_issue_pending(drv_data->controller->dma_tx); atomic_set(&drv_data->dma_running, 1); } @@ -187,30 +187,30 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data) void pxa2xx_spi_dma_stop(struct driver_data *drv_data) { atomic_set(&drv_data->dma_running, 0); - dmaengine_terminate_sync(drv_data->master->dma_rx); - dmaengine_terminate_sync(drv_data->master->dma_tx); + dmaengine_terminate_sync(drv_data->controller->dma_rx); + dmaengine_terminate_sync(drv_data->controller->dma_tx); } int pxa2xx_spi_dma_setup(struct driver_data *drv_data) { - struct pxa2xx_spi_master *pdata = drv_data->master_info; + struct pxa2xx_spi_controller *pdata = drv_data->controller_info; struct device *dev = &drv_data->pdev->dev; - struct spi_controller *master = drv_data->master; + struct spi_controller *controller = drv_data->controller; dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - master->dma_tx = dma_request_slave_channel_compat(mask, + controller->dma_tx = dma_request_slave_channel_compat(mask, pdata->dma_filter, pdata->tx_param, dev, "tx"); - if (!master->dma_tx) + if (!controller->dma_tx) return -ENODEV; - master->dma_rx = dma_request_slave_channel_compat(mask, + controller->dma_rx = dma_request_slave_channel_compat(mask, pdata->dma_filter, pdata->rx_param, dev, "rx"); - if (!master->dma_rx) { - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + if (!controller->dma_rx) { + dma_release_channel(controller->dma_tx); + controller->dma_tx = NULL; return -ENODEV; } @@ -219,17 +219,17 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data) void pxa2xx_spi_dma_release(struct driver_data *drv_data) { - struct spi_controller *master = drv_data->master; + struct spi_controller *controller = drv_data->controller; - if (master->dma_rx) { - dmaengine_terminate_sync(master->dma_rx); - dma_release_channel(master->dma_rx); - master->dma_rx = NULL; + if (controller->dma_rx) { + dmaengine_terminate_sync(controller->dma_rx); + dma_release_channel(controller->dma_rx); + controller->dma_rx = NULL; } - if (master->dma_tx) { - dmaengine_terminate_sync(master->dma_tx); - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + if (controller->dma_tx) { + dmaengine_terminate_sync(controller->dma_tx); + dma_release_channel(controller->dma_tx); + controller->dma_tx = NULL; } } diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 869f188b02eb..1727fdfbac28 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -197,7 +197,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, struct platform_device_info pi; int ret; struct platform_device *pdev; - struct pxa2xx_spi_master spi_pdata; + struct pxa2xx_spi_controller spi_pdata; struct ssp_device *ssp; struct pxa_spi_info *c; char buf[40]; @@ -265,7 +265,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, static void pxa2xx_spi_pci_remove(struct pci_dev *dev) { struct platform_device *pdev = pci_get_drvdata(dev); - struct pxa2xx_spi_master *spi_pdata; + struct pxa2xx_spi_controller *spi_pdata; spi_pdata = dev_get_platdata(&pdev->dev); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index d84b893a64d7..b6ddba833d02 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -328,7 +328,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data) __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); /* Enable multiblock DMA transfers */ - if (drv_data->master_info->enable_dma) { + if (drv_data->controller_info->enable_dma) { __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1); if (config->reg_general >= 0) { @@ -368,7 +368,7 @@ static void lpss_ssp_select_cs(struct spi_device *spi, __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); ndelay(1000000000 / - (drv_data->master->max_speed_hz / 2)); + (drv_data->controller->max_speed_hz / 2)); } } @@ -567,7 +567,7 @@ static int u32_reader(struct driver_data *drv_data) static void reset_sccr1(struct driver_data *drv_data) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); u32 sccr1_reg; sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; @@ -599,8 +599,8 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) dev_err(&drv_data->pdev->dev, "%s\n", msg); - drv_data->master->cur_msg->status = -EIO; - spi_finalize_current_transfer(drv_data->master); + drv_data->controller->cur_msg->status = -EIO; + spi_finalize_current_transfer(drv_data->controller); } static void int_transfer_complete(struct driver_data *drv_data) @@ -611,7 +611,7 @@ static void int_transfer_complete(struct driver_data *drv_data) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); - spi_finalize_current_transfer(drv_data->master); + spi_finalize_current_transfer(drv_data->controller); } static irqreturn_t interrupt_transfer(struct driver_data *drv_data) @@ -747,7 +747,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id) pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1); pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); - if (!drv_data->master->cur_msg) { + if (!drv_data->controller->cur_msg) { handle_bad_msg(drv_data); /* Never fail */ return IRQ_HANDLED; @@ -879,7 +879,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds) static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) { - unsigned long ssp_clk = drv_data->master->max_speed_hz; + unsigned long ssp_clk = drv_data->controller->max_speed_hz; const struct ssp_device *ssp = drv_data->ssp; rate = min_t(int, ssp_clk, rate); @@ -894,7 +894,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, int rate) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); unsigned int clk_div; switch (drv_data->ssp_type) { @@ -908,7 +908,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, return clk_div << 8; } -static bool pxa2xx_spi_can_dma(struct spi_controller *master, +static bool pxa2xx_spi_can_dma(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *xfer) { @@ -919,12 +919,12 @@ static bool pxa2xx_spi_can_dma(struct spi_controller *master, xfer->len >= chip->dma_burst_size; } -static int pxa2xx_spi_transfer_one(struct spi_controller *master, +static int pxa2xx_spi_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { - struct driver_data *drv_data = spi_controller_get_devdata(master); - struct spi_message *message = master->cur_msg; + struct driver_data *drv_data = spi_controller_get_devdata(controller); + struct spi_message *message = controller->cur_msg; struct chip_data *chip = spi_get_ctldata(message->spi); u32 dma_thresh = chip->dma_threshold; u32 dma_burst = chip->dma_burst_size; @@ -1006,9 +1006,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, "DMA burst size reduced to match bits_per_word\n"); } - dma_mapped = master->can_dma && - master->can_dma(master, message->spi, transfer) && - master->cur_msg_mapped; + dma_mapped = controller->can_dma && + controller->can_dma(controller, message->spi, transfer) && + controller->cur_msg_mapped; if (dma_mapped) { /* Ensure we have the correct interrupt handler */ @@ -1036,12 +1036,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits); if (!pxa25x_ssp_comp(drv_data)) dev_dbg(&message->spi->dev, "%u Hz actual, %s\n", - master->max_speed_hz + controller->max_speed_hz / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)), dma_mapped ? "DMA" : "PIO"); else dev_dbg(&message->spi->dev, "%u Hz actual, %s\n", - master->max_speed_hz / 2 + controller->max_speed_hz / 2 / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)), dma_mapped ? "DMA" : "PIO"); @@ -1092,7 +1092,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, } } - if (spi_controller_is_slave(master)) { + if (spi_controller_is_slave(controller)) { while (drv_data->write(drv_data)) ; if (drv_data->gpiod_ready) { @@ -1111,9 +1111,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, return 1; } -static int pxa2xx_spi_slave_abort(struct spi_master *master) +static int pxa2xx_spi_slave_abort(struct spi_controller *controller) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Stop and reset SSP */ write_SSSR_CS(drv_data, drv_data->clear_sr); @@ -1126,16 +1126,16 @@ static int pxa2xx_spi_slave_abort(struct spi_master *master) dev_dbg(&drv_data->pdev->dev, "transfer aborted\n"); - drv_data->master->cur_msg->status = -EINTR; - spi_finalize_current_transfer(drv_data->master); + drv_data->controller->cur_msg->status = -EINTR; + spi_finalize_current_transfer(drv_data->controller); return 0; } -static void pxa2xx_spi_handle_err(struct spi_controller *master, +static void pxa2xx_spi_handle_err(struct spi_controller *controller, struct spi_message *msg) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP */ pxa2xx_spi_write(drv_data, SSCR0, @@ -1159,9 +1159,9 @@ static void pxa2xx_spi_handle_err(struct spi_controller *master, pxa2xx_spi_dma_stop(drv_data); } -static int pxa2xx_spi_unprepare_transfer(struct spi_controller *master) +static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP now */ pxa2xx_spi_write(drv_data, SSCR0, @@ -1260,7 +1260,7 @@ static int setup(struct spi_device *spi) break; default: tx_hi_thres = 0; - if (spi_controller_is_slave(drv_data->master)) { + if (spi_controller_is_slave(drv_data->controller)) { tx_thres = 1; rx_thres = 2; } else { @@ -1287,7 +1287,7 @@ static int setup(struct spi_device *spi) chip->frm = spi->chip_select; } - chip->enable_dma = drv_data->master_info->enable_dma; + chip->enable_dma = drv_data->controller_info->enable_dma; chip->timeout = TIMOUT_DFLT; } @@ -1310,7 +1310,7 @@ static int setup(struct spi_device *spi) if (chip_info->enable_loopback) chip->cr1 = SSCR1_LBM; } - if (spi_controller_is_slave(drv_data->master)) { + if (spi_controller_is_slave(drv_data->controller)) { chip->cr1 |= SSCR1_SCFR; chip->cr1 |= SSCR1_SCLKDIR; chip->cr1 |= SSCR1_SFRMDIR; @@ -1497,10 +1497,10 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) #endif /* CONFIG_PCI */ -static struct pxa2xx_spi_master * +static struct pxa2xx_spi_controller * pxa2xx_spi_init_pdata(struct platform_device *pdev) { - struct pxa2xx_spi_master *pdata; + struct pxa2xx_spi_controller *pdata; struct acpi_device *adev; struct ssp_device *ssp; struct resource *res; @@ -1568,10 +1568,10 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) return pdata; } -static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master, +static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller, unsigned int cs) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); if (has_acpi_companion(&drv_data->pdev->dev)) { switch (drv_data->ssp_type) { @@ -1595,8 +1595,8 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master, static int pxa2xx_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct pxa2xx_spi_master *platform_info; - struct spi_controller *master; + struct pxa2xx_spi_controller *platform_info; + struct spi_controller *controller; struct driver_data *drv_data; struct ssp_device *ssp; const struct lpss_config *config; @@ -1622,37 +1622,37 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } if (platform_info->is_slave) - master = spi_alloc_slave(dev, sizeof(struct driver_data)); + controller = spi_alloc_slave(dev, sizeof(struct driver_data)); else - master = spi_alloc_master(dev, sizeof(struct driver_data)); + controller = spi_alloc_master(dev, sizeof(struct driver_data)); - if (!master) { - dev_err(&pdev->dev, "cannot alloc spi_master\n"); + if (!controller) { + dev_err(&pdev->dev, "cannot alloc spi_controller\n"); pxa_ssp_free(ssp); return -ENOMEM; } - drv_data = spi_controller_get_devdata(master); - drv_data->master = master; - drv_data->master_info = platform_info; + drv_data = spi_controller_get_devdata(controller); + drv_data->controller = controller; + drv_data->controller_info = platform_info; drv_data->pdev = pdev; drv_data->ssp = ssp; - master->dev.of_node = pdev->dev.of_node; + controller->dev.of_node = pdev->dev.of_node; /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; - - master->bus_num = ssp->port_id; - master->dma_alignment = DMA_ALIGNMENT; - master->cleanup = cleanup; - master->setup = setup; - master->set_cs = pxa2xx_spi_set_cs; - master->transfer_one = pxa2xx_spi_transfer_one; - master->slave_abort = pxa2xx_spi_slave_abort; - master->handle_err = pxa2xx_spi_handle_err; - master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; - master->fw_translate_cs = pxa2xx_spi_fw_translate_cs; - master->auto_runtime_pm = true; - master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; + + controller->bus_num = ssp->port_id; + controller->dma_alignment = DMA_ALIGNMENT; + controller->cleanup = cleanup; + controller->setup = setup; + controller->set_cs = pxa2xx_spi_set_cs; + controller->transfer_one = pxa2xx_spi_transfer_one; + controller->slave_abort = pxa2xx_spi_slave_abort; + controller->handle_err = pxa2xx_spi_handle_err; + controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; + controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs; + controller->auto_runtime_pm = true; + controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; drv_data->ssp_type = ssp->type; @@ -1661,10 +1661,10 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) if (pxa25x_ssp_comp(drv_data)) { switch (drv_data->ssp_type) { case QUARK_X1000_SSP: - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); break; default: - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); break; } @@ -1673,7 +1673,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data->clear_sr = SSSR_ROR; drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; } else { - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; drv_data->dma_cr1 = DEFAULT_DMA_CR1; drv_data->clear_sr = SSSR_ROR | SSSR_TINT; @@ -1685,7 +1685,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data); if (status < 0) { dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); - goto out_error_master_alloc; + goto out_error_controller_alloc; } /* Setup DMA if requested */ @@ -1695,7 +1695,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) dev_dbg(dev, "no DMA channels available, using PIO\n"); platform_info->enable_dma = false; } else { - master->can_dma = pxa2xx_spi_can_dma; + controller->can_dma = pxa2xx_spi_can_dma; + controller->max_dma_len = MAX_DMA_LEN; } } @@ -1704,7 +1705,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) if (status) goto out_error_dma_irq_alloc; - master->max_speed_hz = clk_get_rate(ssp->clk); + controller->max_speed_hz = clk_get_rate(ssp->clk); /* Load default SSP configuration */ pxa2xx_spi_write(drv_data, SSCR0, 0); @@ -1727,7 +1728,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) break; default: - if (spi_controller_is_slave(master)) { + if (spi_controller_is_slave(controller)) { tmp = SSCR1_SCFR | SSCR1_SCLKDIR | SSCR1_SFRMDIR | @@ -1740,7 +1741,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } pxa2xx_spi_write(drv_data, SSCR1, tmp); tmp = SSCR0_Motorola | SSCR0_DataSize(8); - if (!spi_controller_is_slave(master)) + if (!spi_controller_is_slave(controller)) tmp |= SSCR0_SCR(2); pxa2xx_spi_write(drv_data, SSCR0, tmp); break; @@ -1765,24 +1766,24 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) platform_info->num_chipselect = config->cs_num; } } - master->num_chipselect = platform_info->num_chipselect; + controller->num_chipselect = platform_info->num_chipselect; count = gpiod_count(&pdev->dev, "cs"); if (count > 0) { int i; - master->num_chipselect = max_t(int, count, - master->num_chipselect); + controller->num_chipselect = max_t(int, count, + controller->num_chipselect); drv_data->cs_gpiods = devm_kcalloc(&pdev->dev, - master->num_chipselect, sizeof(struct gpio_desc *), + controller->num_chipselect, sizeof(struct gpio_desc *), GFP_KERNEL); if (!drv_data->cs_gpiods) { status = -ENOMEM; goto out_error_clock_enabled; } - for (i = 0; i < master->num_chipselect; i++) { + for (i = 0; i < controller->num_chipselect; i++) { struct gpio_desc *gpiod; gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS); @@ -1815,9 +1816,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); - status = devm_spi_register_controller(&pdev->dev, master); + status = devm_spi_register_controller(&pdev->dev, controller); if (status != 0) { - dev_err(&pdev->dev, "problem registering spi master\n"); + dev_err(&pdev->dev, "problem registering spi controller\n"); goto out_error_clock_enabled; } @@ -1832,8 +1833,8 @@ out_error_dma_irq_alloc: pxa2xx_spi_dma_release(drv_data); free_irq(ssp->irq, drv_data); -out_error_master_alloc: - spi_controller_put(master); +out_error_controller_alloc: + spi_controller_put(controller); pxa_ssp_free(ssp); return status; } @@ -1854,7 +1855,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) clk_disable_unprepare(ssp->clk); /* Release DMA */ - if (drv_data->master_info->enable_dma) + if (drv_data->controller_info->enable_dma) pxa2xx_spi_dma_release(drv_data); pm_runtime_put_noidle(&pdev->dev); @@ -1876,7 +1877,7 @@ static int pxa2xx_spi_suspend(struct device *dev) struct ssp_device *ssp = drv_data->ssp; int status; - status = spi_controller_suspend(drv_data->master); + status = spi_controller_suspend(drv_data->controller); if (status != 0) return status; pxa2xx_spi_write(drv_data, SSCR0, 0); @@ -1901,7 +1902,7 @@ static int pxa2xx_spi_resume(struct device *dev) } /* Start the queue running */ - return spi_controller_resume(drv_data->master); + return spi_controller_resume(drv_data->controller); } #endif diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 4e324da66ef7..aba777b4502d 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -31,10 +31,10 @@ struct driver_data { /* SPI framework hookup */ enum pxa_ssp_type ssp_type; - struct spi_controller *master; + struct spi_controller *controller; /* PXA hookup */ - struct pxa2xx_spi_master *master_info; + struct pxa2xx_spi_controller *controller_info; /* SSP register addresses */ void __iomem *ioaddr; diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index a4ef641b5227..556870dcdf79 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -180,7 +180,7 @@ struct rspi_data { void __iomem *addr; u32 max_speed_hz; - struct spi_master *master; + struct spi_controller *ctlr; wait_queue_head_t wait; struct clk *clk; u16 spcmd; @@ -237,8 +237,8 @@ static u16 rspi_read_data(const struct rspi_data *rspi) /* optional functions */ struct spi_ops { int (*set_config_register)(struct rspi_data *rspi, int access_size); - int (*transfer_one)(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer); + int (*transfer_one)(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer); u16 mode_bits; u16 flags; u16 fifo_size; @@ -466,7 +466,7 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data) { int error = rspi_wait_for_tx_empty(rspi); if (error < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return error; } rspi_write_data(rspi, data); @@ -480,7 +480,7 @@ static int rspi_data_in(struct rspi_data *rspi) error = rspi_wait_for_rx_full(rspi); if (error < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return error; } data = rspi_read_data(rspi); @@ -526,8 +526,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { - desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, - rx->sgl, rx->nents, DMA_DEV_TO_MEM, + desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl, + rx->nents, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) { ret = -EAGAIN; @@ -546,8 +546,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, } if (tx) { - desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, - tx->sgl, tx->nents, DMA_MEM_TO_DEV, + desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl, + tx->nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { ret = -EAGAIN; @@ -584,9 +584,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, /* Now start DMA */ if (rx) - dma_async_issue_pending(rspi->master->dma_rx); + dma_async_issue_pending(rspi->ctlr->dma_rx); if (tx) - dma_async_issue_pending(rspi->master->dma_tx); + dma_async_issue_pending(rspi->ctlr->dma_tx); ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); @@ -594,13 +594,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, ret = 0; } else { if (!ret) { - dev_err(&rspi->master->dev, "DMA timeout\n"); + dev_err(&rspi->ctlr->dev, "DMA timeout\n"); ret = -ETIMEDOUT; } if (tx) - dmaengine_terminate_all(rspi->master->dma_tx); + dmaengine_terminate_all(rspi->ctlr->dma_tx); if (rx) - dmaengine_terminate_all(rspi->master->dma_rx); + dmaengine_terminate_all(rspi->ctlr->dma_rx); } rspi_disable_irq(rspi, irq_mask); @@ -614,12 +614,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, no_dma_tx: if (rx) - dmaengine_terminate_all(rspi->master->dma_rx); + dmaengine_terminate_all(rspi->ctlr->dma_rx); no_dma_rx: if (ret == -EAGAIN) { pr_warn_once("%s %s: DMA not available, falling back to PIO\n", - dev_driver_string(&rspi->master->dev), - dev_name(&rspi->master->dev)); + dev_driver_string(&rspi->ctlr->dev), + dev_name(&rspi->ctlr->dev)); } return ret; } @@ -660,10 +660,10 @@ static bool __rspi_can_dma(const struct rspi_data *rspi, return xfer->len > rspi->ops->fifo_size; } -static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, +static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); return __rspi_can_dma(rspi, xfer); } @@ -671,7 +671,7 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, static int rspi_dma_check_then_transfer(struct rspi_data *rspi, struct spi_transfer *xfer) { - if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer)) + if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer)) return -EAGAIN; /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ @@ -698,10 +698,10 @@ static int rspi_common_transfer(struct rspi_data *rspi, return 0; } -static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer) +static int rspi_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); u8 spcr; spcr = rspi_read8(rspi, RSPI_SPCR); @@ -716,11 +716,11 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, return rspi_common_transfer(rspi, xfer); } -static int rspi_rz_transfer_one(struct spi_master *master, +static int rspi_rz_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); rspi_rz_receive_init(rspi); @@ -739,7 +739,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx, if (n == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_tx_empty(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return ret; } for (i = 0; i < n; i++) @@ -747,7 +747,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx, ret = rspi_wait_for_rx_full(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return ret; } for (i = 0; i < n; i++) @@ -785,7 +785,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) unsigned int i, len; int ret; - if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { + if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) { ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL); if (ret != -EAGAIN) return ret; @@ -796,7 +796,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) if (len == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_tx_empty(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return ret; } for (i = 0; i < len; i++) @@ -822,7 +822,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) unsigned int i, len; int ret; - if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { + if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) { int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); if (ret != -EAGAIN) return ret; @@ -833,7 +833,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) if (len == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_rx_full(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return ret; } for (i = 0; i < len; i++) @@ -849,10 +849,10 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) return 0; } -static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer) +static int qspi_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); if (spi->mode & SPI_LOOP) { return qspi_transfer_out_in(rspi, xfer); @@ -870,7 +870,7 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, static int rspi_setup(struct spi_device *spi) { - struct rspi_data *rspi = spi_master_get_devdata(spi->master); + struct rspi_data *rspi = spi_controller_get_devdata(spi->controller); rspi->max_speed_hz = spi->max_speed_hz; @@ -955,10 +955,10 @@ static int qspi_setup_sequencer(struct rspi_data *rspi, return 0; } -static int rspi_prepare_message(struct spi_master *master, +static int rspi_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); int ret; if (msg->spi->mode & @@ -974,10 +974,10 @@ static int rspi_prepare_message(struct spi_master *master, return 0; } -static int rspi_unprepare_message(struct spi_master *master, +static int rspi_unprepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); /* Disable SPI function */ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR); @@ -1081,7 +1081,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, return chan; } -static int rspi_request_dma(struct device *dev, struct spi_master *master, +static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr, const struct resource *res) { const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); @@ -1099,37 +1099,37 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master, return 0; } - master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, - res->start + RSPI_SPDR); - if (!master->dma_tx) + ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, + res->start + RSPI_SPDR); + if (!ctlr->dma_tx) return -ENODEV; - master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, - res->start + RSPI_SPDR); - if (!master->dma_rx) { - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, + res->start + RSPI_SPDR); + if (!ctlr->dma_rx) { + dma_release_channel(ctlr->dma_tx); + ctlr->dma_tx = NULL; return -ENODEV; } - master->can_dma = rspi_can_dma; + ctlr->can_dma = rspi_can_dma; dev_info(dev, "DMA available"); return 0; } -static void rspi_release_dma(struct spi_master *master) +static void rspi_release_dma(struct spi_controller *ctlr) { - if (master->dma_tx) - dma_release_channel(master->dma_tx); - if (master->dma_rx) - dma_release_channel(master->dma_rx); + if (ctlr->dma_tx) + dma_release_channel(ctlr->dma_tx); + if (ctlr->dma_rx) + dma_release_channel(ctlr->dma_rx); } static int rspi_remove(struct platform_device *pdev) { struct rspi_data *rspi = platform_get_drvdata(pdev); - rspi_release_dma(rspi->master); + rspi_release_dma(rspi->ctlr); pm_runtime_disable(&pdev->dev); return 0; @@ -1139,7 +1139,7 @@ static const struct spi_ops rspi_ops = { .set_config_register = rspi_set_config_register, .transfer_one = rspi_transfer_one, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, - .flags = SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_TX, .fifo_size = 8, }; @@ -1147,7 +1147,7 @@ static const struct spi_ops rspi_rz_ops = { .set_config_register = rspi_rz_set_config_register, .transfer_one = rspi_rz_transfer_one, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, - .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 8, /* 8 for TX, 32 for RX */ }; @@ -1157,7 +1157,7 @@ static const struct spi_ops qspi_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD, - .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 32, }; @@ -1174,7 +1174,7 @@ static const struct of_device_id rspi_of_match[] = { MODULE_DEVICE_TABLE(of, rspi_of_match); -static int rspi_parse_dt(struct device *dev, struct spi_master *master) +static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr) { u32 num_cs; int error; @@ -1186,12 +1186,12 @@ static int rspi_parse_dt(struct device *dev, struct spi_master *master) return error; } - master->num_chipselect = num_cs; + ctlr->num_chipselect = num_cs; return 0; } #else #define rspi_of_match NULL -static inline int rspi_parse_dt(struct device *dev, struct spi_master *master) +static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr) { return -EINVAL; } @@ -1212,28 +1212,28 @@ static int rspi_request_irq(struct device *dev, unsigned int irq, static int rspi_probe(struct platform_device *pdev) { struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct rspi_data *rspi; int ret; const struct rspi_plat_data *rspi_pd; const struct spi_ops *ops; - master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); - if (master == NULL) + ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); + if (ctlr == NULL) return -ENOMEM; ops = of_device_get_match_data(&pdev->dev); if (ops) { - ret = rspi_parse_dt(&pdev->dev, master); + ret = rspi_parse_dt(&pdev->dev, ctlr); if (ret) goto error1; } else { ops = (struct spi_ops *)pdev->id_entry->driver_data; rspi_pd = dev_get_platdata(&pdev->dev); if (rspi_pd && rspi_pd->num_chipselect) - master->num_chipselect = rspi_pd->num_chipselect; + ctlr->num_chipselect = rspi_pd->num_chipselect; else - master->num_chipselect = 2; /* default */ + ctlr->num_chipselect = 2; /* default */ } /* ops parameter check */ @@ -1243,10 +1243,10 @@ static int rspi_probe(struct platform_device *pdev) goto error1; } - rspi = spi_master_get_devdata(master); + rspi = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, rspi); rspi->ops = ops; - rspi->master = master; + rspi->ctlr = ctlr; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rspi->addr = devm_ioremap_resource(&pdev->dev, res); @@ -1266,15 +1266,15 @@ static int rspi_probe(struct platform_device *pdev) init_waitqueue_head(&rspi->wait); - master->bus_num = pdev->id; - master->setup = rspi_setup; - master->auto_runtime_pm = true; - master->transfer_one = ops->transfer_one; - master->prepare_message = rspi_prepare_message; - master->unprepare_message = rspi_unprepare_message; - master->mode_bits = ops->mode_bits; - master->flags = ops->flags; - master->dev.of_node = pdev->dev.of_node; + ctlr->bus_num = pdev->id; + ctlr->setup = rspi_setup; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one = ops->transfer_one; + ctlr->prepare_message = rspi_prepare_message; + ctlr->unprepare_message = rspi_unprepare_message; + ctlr->mode_bits = ops->mode_bits; + ctlr->flags = ops->flags; + ctlr->dev.of_node = pdev->dev.of_node; ret = platform_get_irq_byname(pdev, "rx"); if (ret < 0) { @@ -1311,13 +1311,13 @@ static int rspi_probe(struct platform_device *pdev) goto error2; } - ret = rspi_request_dma(&pdev->dev, master, res); + ret = rspi_request_dma(&pdev->dev, ctlr, res); if (ret < 0) dev_warn(&pdev->dev, "DMA not available, using PIO\n"); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto error3; } @@ -1326,11 +1326,11 @@ static int rspi_probe(struct platform_device *pdev) return 0; error3: - rspi_release_dma(master); + rspi_release_dma(ctlr); error2: pm_runtime_disable(&pdev->dev); error1: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } @@ -1349,14 +1349,14 @@ static int rspi_suspend(struct device *dev) { struct rspi_data *rspi = dev_get_drvdata(dev); - return spi_master_suspend(rspi->master); + return spi_controller_suspend(rspi->ctlr); } static int rspi_resume(struct device *dev) { struct rspi_data *rspi = dev_get_drvdata(dev); - return spi_master_resume(rspi->master); + return spi_controller_resume(rspi->ctlr); } static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index dc0926e43665..7f73f91d412a 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c @@ -35,7 +35,7 @@ struct hspi_priv { void __iomem *addr; - struct spi_master *master; + struct spi_controller *ctlr; struct device *dev; struct clk *clk; }; @@ -140,10 +140,10 @@ static void hspi_hw_setup(struct hspi_priv *hspi, hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */ } -static int hspi_transfer_one_message(struct spi_master *master, +static int hspi_transfer_one_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct hspi_priv *hspi = spi_master_get_devdata(master); + struct hspi_priv *hspi = spi_controller_get_devdata(ctlr); struct spi_transfer *t; u32 tx; u32 rx; @@ -205,7 +205,7 @@ static int hspi_transfer_one_message(struct spi_master *master, ndelay(nsecs); hspi_hw_cs_disable(hspi); } - spi_finalize_current_message(master); + spi_finalize_current_message(ctlr); return ret; } @@ -213,7 +213,7 @@ static int hspi_transfer_one_message(struct spi_master *master, static int hspi_probe(struct platform_device *pdev) { struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct hspi_priv *hspi; struct clk *clk; int ret; @@ -225,11 +225,9 @@ static int hspi_probe(struct platform_device *pdev) return -EINVAL; } - master = spi_alloc_master(&pdev->dev, sizeof(*hspi)); - if (!master) { - dev_err(&pdev->dev, "spi_alloc_master error.\n"); + ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi)); + if (!ctlr) return -ENOMEM; - } clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { @@ -238,33 +236,32 @@ static int hspi_probe(struct platform_device *pdev) goto error0; } - hspi = spi_master_get_devdata(master); + hspi = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, hspi); /* init hspi */ - hspi->master = master; + hspi->ctlr = ctlr; hspi->dev = &pdev->dev; hspi->clk = clk; hspi->addr = devm_ioremap(hspi->dev, res->start, resource_size(res)); if (!hspi->addr) { - dev_err(&pdev->dev, "ioremap error.\n"); ret = -ENOMEM; goto error1; } pm_runtime_enable(&pdev->dev); - master->bus_num = pdev->id; - master->mode_bits = SPI_CPOL | SPI_CPHA; - master->dev.of_node = pdev->dev.of_node; - master->auto_runtime_pm = true; - master->transfer_one_message = hspi_transfer_one_message; - master->bits_per_word_mask = SPI_BPW_MASK(8); + ctlr->bus_num = pdev->id; + ctlr->mode_bits = SPI_CPOL | SPI_CPHA; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one_message = hspi_transfer_one_message; + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto error2; } @@ -275,7 +272,7 @@ static int hspi_probe(struct platform_device *pdev) error1: clk_put(clk); error0: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index d14b407cc800..e2eb466db10a 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * SuperH MSIOF SPI Master Interface + * SuperH MSIOF SPI Controller Interface * * Copyright (c) 2009 Magnus Damm * Copyright (C) 2014 Renesas Electronics Corporation @@ -32,14 +32,15 @@ #include <asm/unaligned.h> struct sh_msiof_chipdata { + u32 bits_per_word_mask; u16 tx_fifo_size; u16 rx_fifo_size; - u16 master_flags; + u16 ctlr_flags; u16 min_div_pow; }; struct sh_msiof_spi_priv { - struct spi_master *master; + struct spi_controller *ctlr; void __iomem *mapbase; struct clk *clk; struct platform_device *pdev; @@ -287,7 +288,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps); sh_msiof_write(p, TSCR, scr); - if (!(p->master->flags & SPI_MASTER_MUST_TX)) + if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, RSCR, scr); } @@ -351,14 +352,14 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, tmp |= !cs_high << MDR1_SYNCAC_SHIFT; tmp |= lsb_first << MDR1_BITLSB_SHIFT; tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); - if (spi_controller_is_slave(p->master)) { + if (spi_controller_is_slave(p->ctlr)) { sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON); } else { sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON | (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT); } - if (p->master->flags & SPI_MASTER_MUST_TX) { + if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ tmp &= ~0x0000ffff; } @@ -382,7 +383,7 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, { u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words); - if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX)) + if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, TMDR2, dr2); else sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1); @@ -539,8 +540,9 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, static int sh_msiof_spi_setup(struct spi_device *spi) { - struct device_node *np = spi->master->dev.of_node; - struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + struct device_node *np = spi->controller->dev.of_node; + struct sh_msiof_spi_priv *p = + spi_controller_get_devdata(spi->controller); u32 clr, set, tmp; if (!np) { @@ -556,7 +558,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; } - if (spi_controller_is_slave(p->master)) + if (spi_controller_is_slave(p->ctlr)) return 0; if (p->native_cs_inited && @@ -581,10 +583,10 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; } -static int sh_msiof_prepare_message(struct spi_master *master, +static int sh_msiof_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); const struct spi_device *spi = msg->spi; u32 ss, cs_high; @@ -605,7 +607,7 @@ static int sh_msiof_prepare_message(struct spi_master *master, static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) { - bool slave = spi_controller_is_slave(p->master); + bool slave = spi_controller_is_slave(p->ctlr); int ret = 0; /* setup clock and rx/tx signals */ @@ -625,7 +627,7 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) { - bool slave = spi_controller_is_slave(p->master); + bool slave = spi_controller_is_slave(p->ctlr); int ret = 0; /* shut down frame, rx/tx and clock signals */ @@ -641,9 +643,9 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) return ret; } -static int sh_msiof_slave_abort(struct spi_master *master) +static int sh_msiof_slave_abort(struct spi_controller *ctlr) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); p->slave_aborted = true; complete(&p->done); @@ -654,7 +656,7 @@ static int sh_msiof_slave_abort(struct spi_master *master) static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p, struct completion *x) { - if (spi_controller_is_slave(p->master)) { + if (spi_controller_is_slave(p->ctlr)) { if (wait_for_completion_interruptible(x) || p->slave_aborted) { dev_dbg(&p->pdev->dev, "interrupted\n"); @@ -754,7 +756,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { ier_bits |= IER_RDREQE | IER_RDMAE; - desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, + desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, p->rx_dma_addr, len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) @@ -769,9 +771,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, if (tx) { ier_bits |= IER_TDREQE | IER_TDMAE; - dma_sync_single_for_device(p->master->dma_tx->device->dev, + dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, p->tx_dma_addr, len, DMA_TO_DEVICE); - desc_tx = dmaengine_prep_slave_single(p->master->dma_tx, + desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, p->tx_dma_addr, len, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { @@ -803,9 +805,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* Now start DMA */ if (rx) - dma_async_issue_pending(p->master->dma_rx); + dma_async_issue_pending(p->ctlr->dma_rx); if (tx) - dma_async_issue_pending(p->master->dma_tx); + dma_async_issue_pending(p->ctlr->dma_tx); ret = sh_msiof_spi_start(p, rx); if (ret) { @@ -845,9 +847,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } if (rx) - dma_sync_single_for_cpu(p->master->dma_rx->device->dev, - p->rx_dma_addr, len, - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev, + p->rx_dma_addr, len, DMA_FROM_DEVICE); return 0; @@ -856,10 +857,10 @@ stop_reset: sh_msiof_spi_stop(p, rx); stop_dma: if (tx) - dmaengine_terminate_all(p->master->dma_tx); + dmaengine_terminate_all(p->ctlr->dma_tx); no_dma_tx: if (rx) - dmaengine_terminate_all(p->master->dma_rx); + dmaengine_terminate_all(p->ctlr->dma_rx); sh_msiof_write(p, IER, 0); return ret; } @@ -907,11 +908,11 @@ static void copy_plain32(u32 *dst, const u32 *src, unsigned int words) memcpy(dst, src, words * 4); } -static int sh_msiof_transfer_one(struct spi_master *master, +static int sh_msiof_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *t) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); void (*copy32)(u32 *, const u32 *, unsigned int); void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); @@ -926,10 +927,10 @@ static int sh_msiof_transfer_one(struct spi_master *master, int ret; /* setup clocks (clock already enabled in chipselect()) */ - if (!spi_controller_is_slave(p->master)) + if (!spi_controller_is_slave(p->ctlr)) sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz); - while (master->dma_tx && len > 15) { + while (ctlr->dma_tx && len > 15) { /* * DMA supports 32-bit words only, hence pack 8-bit and 16-bit * words, with byte resp. word swapping. @@ -937,17 +938,13 @@ static int sh_msiof_transfer_one(struct spi_master *master, unsigned int l = 0; if (tx_buf) - l = min(len, p->tx_fifo_size * 4); + l = min(round_down(len, 4), p->tx_fifo_size * 4); if (rx_buf) - l = min(len, p->rx_fifo_size * 4); + l = min(round_down(len, 4), p->rx_fifo_size * 4); if (bits <= 8) { - if (l & 3) - break; copy32 = copy_bswap32; } else if (bits <= 16) { - if (l & 3) - break; copy32 = copy_wswap32; } else { copy32 = copy_plain32; @@ -1052,23 +1049,28 @@ static int sh_msiof_transfer_one(struct spi_master *master, } static const struct sh_msiof_chipdata sh_data = { + .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = 0, + .ctlr_flags = 0, .min_div_pow = 0, }; static const struct sh_msiof_chipdata rcar_gen2_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = SPI_MASTER_MUST_TX, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 0, }; static const struct sh_msiof_chipdata rcar_gen3_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = SPI_MASTER_MUST_TX, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 1, }; @@ -1136,7 +1138,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p) if (ret <= 0) return 0; - num_cs = max_t(unsigned int, ret, p->master->num_chipselect); + num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect); for (i = 0; i < num_cs; i++) { struct gpio_desc *gpiod; @@ -1206,10 +1208,10 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) { struct platform_device *pdev = p->pdev; struct device *dev = &pdev->dev; - const struct sh_msiof_spi_info *info = dev_get_platdata(dev); + const struct sh_msiof_spi_info *info = p->info; unsigned int dma_tx_id, dma_rx_id; const struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct device *tx_dev, *rx_dev; if (dev->of_node) { @@ -1229,17 +1231,15 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) if (!res) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master = p->master; - master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, - dma_tx_id, - res->start + TFDR); - if (!master->dma_tx) + ctlr = p->ctlr; + ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, + dma_tx_id, res->start + TFDR); + if (!ctlr->dma_tx) return -ENODEV; - master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, - dma_rx_id, - res->start + RFDR); - if (!master->dma_rx) + ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, + dma_rx_id, res->start + RFDR); + if (!ctlr->dma_rx) goto free_tx_chan; p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); @@ -1250,13 +1250,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) if (!p->rx_dma_page) goto free_tx_page; - tx_dev = master->dma_tx->device->dev; + tx_dev = ctlr->dma_tx->device->dev; p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(tx_dev, p->tx_dma_addr)) goto free_rx_page; - rx_dev = master->dma_rx->device->dev; + rx_dev = ctlr->dma_rx->device->dev; p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(rx_dev, p->rx_dma_addr)) @@ -1272,34 +1272,34 @@ free_rx_page: free_tx_page: free_page((unsigned long)p->tx_dma_page); free_rx_chan: - dma_release_channel(master->dma_rx); + dma_release_channel(ctlr->dma_rx); free_tx_chan: - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + dma_release_channel(ctlr->dma_tx); + ctlr->dma_tx = NULL; return -ENODEV; } static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p) { - struct spi_master *master = p->master; + struct spi_controller *ctlr = p->ctlr; - if (!master->dma_tx) + if (!ctlr->dma_tx) return; - dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr, - PAGE_SIZE, DMA_FROM_DEVICE); - dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr, - PAGE_SIZE, DMA_TO_DEVICE); + dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE, + DMA_TO_DEVICE); free_page((unsigned long)p->rx_dma_page); free_page((unsigned long)p->tx_dma_page); - dma_release_channel(master->dma_rx); - dma_release_channel(master->dma_tx); + dma_release_channel(ctlr->dma_rx); + dma_release_channel(ctlr->dma_tx); } static int sh_msiof_spi_probe(struct platform_device *pdev) { struct resource *r; - struct spi_master *master; + struct spi_controller *ctlr; const struct sh_msiof_chipdata *chipdata; struct sh_msiof_spi_info *info; struct sh_msiof_spi_priv *p; @@ -1320,18 +1320,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) } if (info->mode == MSIOF_SPI_SLAVE) - master = spi_alloc_slave(&pdev->dev, - sizeof(struct sh_msiof_spi_priv)); + ctlr = spi_alloc_slave(&pdev->dev, + sizeof(struct sh_msiof_spi_priv)); else - master = spi_alloc_master(&pdev->dev, - sizeof(struct sh_msiof_spi_priv)); - if (master == NULL) + ctlr = spi_alloc_master(&pdev->dev, + sizeof(struct sh_msiof_spi_priv)); + if (ctlr == NULL) return -ENOMEM; - p = spi_master_get_devdata(master); + p = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, p); - p->master = master; + p->ctlr = ctlr; p->info = info; p->min_div_pow = chipdata->min_div_pow; @@ -1378,31 +1378,31 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) p->rx_fifo_size = p->info->rx_fifo_override; /* Setup GPIO chip selects */ - master->num_chipselect = p->info->num_chipselect; + ctlr->num_chipselect = p->info->num_chipselect; ret = sh_msiof_get_cs_gpios(p); if (ret) goto err1; - /* init master code */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; - master->flags = chipdata->master_flags; - master->bus_num = pdev->id; - master->dev.of_node = pdev->dev.of_node; - master->setup = sh_msiof_spi_setup; - master->prepare_message = sh_msiof_prepare_message; - master->slave_abort = sh_msiof_slave_abort; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); - master->auto_runtime_pm = true; - master->transfer_one = sh_msiof_transfer_one; + /* init controller code */ + ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; + ctlr->flags = chipdata->ctlr_flags; + ctlr->bus_num = pdev->id; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->setup = sh_msiof_spi_setup; + ctlr->prepare_message = sh_msiof_prepare_message; + ctlr->slave_abort = sh_msiof_slave_abort; + ctlr->bits_per_word_mask = chipdata->bits_per_word_mask; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one = sh_msiof_transfer_one; ret = sh_msiof_request_dma(p); if (ret < 0) dev_warn(&pdev->dev, "DMA not available, using PIO\n"); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto err2; } @@ -1412,7 +1412,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) sh_msiof_release_dma(p); pm_runtime_disable(&pdev->dev); err1: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } @@ -1436,14 +1436,14 @@ static int sh_msiof_spi_suspend(struct device *dev) { struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); - return spi_master_suspend(p->master); + return spi_controller_suspend(p->ctlr); } static int sh_msiof_spi_resume(struct device *dev) { struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); - return spi_master_resume(p->master); + return spi_controller_resume(p->ctlr); } static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, @@ -1465,7 +1465,7 @@ static struct platform_driver sh_msiof_spi_drv = { }; module_platform_driver(sh_msiof_spi_drv); -MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver"); +MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:spi_sh_msiof"); diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c new file mode 100644 index 000000000000..93ec2c6cdbfd --- /dev/null +++ b/drivers/spi/spi-sifive.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright 2018 SiFive, Inc. +// +// SiFive SPI controller driver (master mode only) +// +// Author: SiFive, Inc. +// sifive@sifive.com + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/io.h> +#include <linux/log2.h> + +#define SIFIVE_SPI_DRIVER_NAME "sifive_spi" + +#define SIFIVE_SPI_MAX_CS 32 +#define SIFIVE_SPI_DEFAULT_DEPTH 8 +#define SIFIVE_SPI_DEFAULT_MAX_BITS 8 + +/* register offsets */ +#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */ +#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */ +#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */ +#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */ +#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */ +#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */ +#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */ +#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */ +#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */ +#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */ +#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */ +#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */ +#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */ +#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */ +#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */ +#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */ + +/* sckdiv bits */ +#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU + +/* sckmode bits */ +#define SIFIVE_SPI_SCKMODE_PHA BIT(0) +#define SIFIVE_SPI_SCKMODE_POL BIT(1) +#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \ + SIFIVE_SPI_SCKMODE_POL) + +/* csmode bits */ +#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U +#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U +#define SIFIVE_SPI_CSMODE_MODE_OFF 3U + +/* delay0 bits */ +#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x)) +#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU +#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16) +#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16) + +/* delay1 bits */ +#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x)) +#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU +#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16) +#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16) + +/* fmt bits */ +#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U +#define SIFIVE_SPI_FMT_PROTO_DUAL 1U +#define SIFIVE_SPI_FMT_PROTO_QUAD 2U +#define SIFIVE_SPI_FMT_PROTO_MASK 3U +#define SIFIVE_SPI_FMT_ENDIAN BIT(2) +#define SIFIVE_SPI_FMT_DIR BIT(3) +#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16) +#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16) + +/* txdata bits */ +#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU +#define SIFIVE_SPI_TXDATA_FULL BIT(31) + +/* rxdata bits */ +#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU +#define SIFIVE_SPI_RXDATA_EMPTY BIT(31) + +/* ie and ip bits */ +#define SIFIVE_SPI_IP_TXWM BIT(0) +#define SIFIVE_SPI_IP_RXWM BIT(1) + +struct sifive_spi { + void __iomem *regs; /* virt. address of control registers */ + struct clk *clk; /* bus clock */ + unsigned int fifo_depth; /* fifo depth in words */ + u32 cs_inactive; /* level of the CS pins when inactive */ + struct completion done; /* wake-up from interrupt */ +}; + +static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value) +{ + iowrite32(value, spi->regs + offset); +} + +static u32 sifive_spi_read(struct sifive_spi *spi, int offset) +{ + return ioread32(spi->regs + offset); +} + +static void sifive_spi_init(struct sifive_spi *spi) +{ + /* Watermark interrupts are disabled by default */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + + /* Default watermark FIFO threshold values */ + sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1); + sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0); + + /* Set CS/SCK Delays and Inactive Time to defaults */ + sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0, + SIFIVE_SPI_DELAY0_CSSCK(1) | + SIFIVE_SPI_DELAY0_SCKCS(1)); + sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1, + SIFIVE_SPI_DELAY1_INTERCS(1) | + SIFIVE_SPI_DELAY1_INTERXFR(0)); + + /* Exit specialized memory-mapped SPI flash mode */ + sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0); +} + +static int +sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg) +{ + struct sifive_spi *spi = spi_master_get_devdata(master); + struct spi_device *device = msg->spi; + + /* Update the chip select polarity */ + if (device->mode & SPI_CS_HIGH) + spi->cs_inactive &= ~BIT(device->chip_select); + else + spi->cs_inactive |= BIT(device->chip_select); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive); + + /* Select the correct device */ + sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, device->chip_select); + + /* Set clock mode */ + sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE, + device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK); + + return 0; +} + +static void sifive_spi_set_cs(struct spi_device *device, bool is_high) +{ + struct sifive_spi *spi = spi_master_get_devdata(device->master); + + /* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */ + if (device->mode & SPI_CS_HIGH) + is_high = !is_high; + + sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ? + SIFIVE_SPI_CSMODE_MODE_AUTO : + SIFIVE_SPI_CSMODE_MODE_HOLD); +} + +static int +sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device, + struct spi_transfer *t) +{ + u32 cr; + unsigned int mode; + + /* Calculate and program the clock rate */ + cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1; + cr &= SIFIVE_SPI_SCKDIV_DIV_MASK; + sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr); + + mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits); + + /* Set frame format */ + cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word); + switch (mode) { + case SPI_NBITS_QUAD: + cr |= SIFIVE_SPI_FMT_PROTO_QUAD; + break; + case SPI_NBITS_DUAL: + cr |= SIFIVE_SPI_FMT_PROTO_DUAL; + break; + default: + cr |= SIFIVE_SPI_FMT_PROTO_SINGLE; + break; + } + if (device->mode & SPI_LSB_FIRST) + cr |= SIFIVE_SPI_FMT_ENDIAN; + if (!t->rx_buf) + cr |= SIFIVE_SPI_FMT_DIR; + sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr); + + /* We will want to poll if the time we need to wait is + * less than the context switching time. + * Let's call that threshold 5us. The operation will take: + * (8/mode) * fifo_depth / hz <= 5 * 10^-6 + * 1600000 * fifo_depth <= hz * mode + */ + return 1600000 * spi->fifo_depth <= t->speed_hz * mode; +} + +static irqreturn_t sifive_spi_irq(int irq, void *dev_id) +{ + struct sifive_spi *spi = dev_id; + u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP); + + if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) { + /* Disable interrupts until next transfer */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + complete(&spi->done); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll) +{ + if (poll) { + u32 cr; + + do { + cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP); + } while (!(cr & bit)); + } else { + reinit_completion(&spi->done); + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit); + wait_for_completion(&spi->done); + } +} + +static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr) +{ + WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA) + & SIFIVE_SPI_TXDATA_FULL) != 0); + sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA, + *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK); +} + +static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr) +{ + u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA); + + WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0); + *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK; +} + +static int +sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device, + struct spi_transfer *t) +{ + struct sifive_spi *spi = spi_master_get_devdata(master); + int poll = sifive_spi_prep_transfer(spi, device, t); + const u8 *tx_ptr = t->tx_buf; + u8 *rx_ptr = t->rx_buf; + unsigned int remaining_words = t->len; + + while (remaining_words) { + unsigned int n_words = min(remaining_words, spi->fifo_depth); + unsigned int i; + + /* Enqueue n_words for transmission */ + for (i = 0; i < n_words; i++) + sifive_spi_tx(spi, tx_ptr++); + + if (rx_ptr) { + /* Wait for transmission + reception to complete */ + sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, + n_words - 1); + sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll); + + /* Read out all the data from the RX FIFO */ + for (i = 0; i < n_words; i++) + sifive_spi_rx(spi, rx_ptr++); + } else { + /* Wait for transmission to complete */ + sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll); + } + + remaining_words -= n_words; + } + + return 0; +} + +static int sifive_spi_probe(struct platform_device *pdev) +{ + struct sifive_spi *spi; + struct resource *res; + int ret, irq, num_cs; + u32 cs_bits, max_bits_per_word; + struct spi_master *master; + + master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi)); + if (!master) { + dev_err(&pdev->dev, "out of memory\n"); + return -ENOMEM; + } + + spi = spi_master_get_devdata(master); + init_completion(&spi->done); + platform_set_drvdata(pdev, master); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + spi->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spi->regs)) { + ret = PTR_ERR(spi->regs); + goto put_master; + } + + spi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(spi->clk)) { + dev_err(&pdev->dev, "Unable to find bus clock\n"); + ret = PTR_ERR(spi->clk); + goto put_master; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Unable to find interrupt\n"); + ret = irq; + goto put_master; + } + + /* Optional parameters */ + ret = + of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth", + &spi->fifo_depth); + if (ret < 0) + spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH; + + ret = + of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word", + &max_bits_per_word); + + if (!ret && max_bits_per_word < 8) { + dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n"); + ret = -EINVAL; + goto put_master; + } + + /* Spin up the bus clock before hitting registers */ + ret = clk_prepare_enable(spi->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable bus clock\n"); + goto put_master; + } + + /* probe the number of CS lines */ + spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU); + cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive); + if (!cs_bits) { + dev_err(&pdev->dev, "Could not auto probe CS lines\n"); + ret = -EINVAL; + goto put_master; + } + + num_cs = ilog2(cs_bits) + 1; + if (num_cs > SIFIVE_SPI_MAX_CS) { + dev_err(&pdev->dev, "Invalid number of spi slaves\n"); + ret = -EINVAL; + goto put_master; + } + + /* Define our master */ + master->dev.of_node = pdev->dev.of_node; + master->bus_num = pdev->id; + master->num_chipselect = num_cs; + master->mode_bits = SPI_CPHA | SPI_CPOL + | SPI_CS_HIGH | SPI_LSB_FIRST + | SPI_TX_DUAL | SPI_TX_QUAD + | SPI_RX_DUAL | SPI_RX_QUAD; + /* TODO: add driver support for bits_per_word < 8 + * we need to "left-align" the bits (unless SPI_LSB_FIRST) + */ + master->bits_per_word_mask = SPI_BPW_MASK(8); + master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS; + master->prepare_message = sifive_spi_prepare_message; + master->set_cs = sifive_spi_set_cs; + master->transfer_one = sifive_spi_transfer_one; + + pdev->dev.dma_mask = NULL; + /* Configure the SPI master hardware */ + sifive_spi_init(spi); + + /* Register for SPI Interrupt */ + ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0, + dev_name(&pdev->dev), spi); + if (ret) { + dev_err(&pdev->dev, "Unable to bind to interrupt\n"); + goto put_master; + } + + dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n", + irq, master->num_chipselect); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_master failed\n"); + goto put_master; + } + + return 0; + +put_master: + spi_master_put(master); + + return ret; +} + +static int sifive_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct sifive_spi *spi = spi_master_get_devdata(master); + + /* Disable all the interrupts just in case */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + + return 0; +} + +static const struct of_device_id sifive_spi_of_match[] = { + { .compatible = "sifive,spi0", }, + {} +}; +MODULE_DEVICE_TABLE(of, sifive_spi_of_match); + +static struct platform_driver sifive_spi_driver = { + .probe = sifive_spi_probe, + .remove = sifive_spi_remove, + .driver = { + .name = SIFIVE_SPI_DRIVER_NAME, + .of_match_table = sifive_spi_of_match, + }, +}; +module_platform_driver(sifive_spi_driver); + +MODULE_AUTHOR("SiFive, Inc. <sifive@sifive.com>"); +MODULE_DESCRIPTION("SiFive SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index 8daa24eec624..1b7eebb72c07 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -2,6 +2,9 @@ // Copyright (C) 2018 Spreadtrum Communications Inc. #include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dma/sprd-dma.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -9,6 +12,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/spi/spi.h> @@ -128,11 +132,28 @@ #define SPRD_SPI_DEFAULT_SOURCE 26000000 #define SPRD_SPI_MAX_SPEED_HZ 48000000 #define SPRD_SPI_AUTOSUSPEND_DELAY 100 +#define SPRD_SPI_DMA_STEP 8 + +enum sprd_spi_dma_channel { + SPRD_SPI_RX, + SPRD_SPI_TX, + SPRD_SPI_MAX, +}; + +struct sprd_spi_dma { + bool enable; + struct dma_chan *dma_chan[SPRD_SPI_MAX]; + enum dma_slave_buswidth width; + u32 fragmens_len; + u32 rx_len; +}; struct sprd_spi { void __iomem *base; + phys_addr_t phy_base; struct device *dev; struct clk *clk; + int irq; u32 src_clk; u32 hw_mode; u32 trans_len; @@ -141,6 +162,8 @@ struct sprd_spi { u32 hw_speed_hz; u32 len; int status; + struct sprd_spi_dma dma; + struct completion xfer_completion; const void *tx_buf; void *rx_buf; int (*read_bufs)(struct sprd_spi *ss, u32 len); @@ -380,7 +403,7 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t) { struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller); u32 trans_len = ss->trans_len, len; - int ret, write_size = 0; + int ret, write_size = 0, read_size = 0; while (trans_len) { len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE : @@ -416,19 +439,223 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t) goto complete; if (ss->trans_mode & SPRD_SPI_RX_MODE) - ss->read_bufs(ss, len); + read_size += ss->read_bufs(ss, len); trans_len -= len; } - ret = write_size; - + if (ss->trans_mode & SPRD_SPI_TX_MODE) + ret = write_size; + else + ret = read_size; complete: sprd_spi_enter_idle(ss); return ret; } +static void sprd_spi_irq_enable(struct sprd_spi *ss) +{ + u32 val; + + /* Clear interrupt status before enabling interrupt. */ + writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR, + ss->base + SPRD_SPI_INT_CLR); + /* Enable SPI interrupt only in DMA mode. */ + val = readl_relaxed(ss->base + SPRD_SPI_INT_EN); + writel_relaxed(val | SPRD_SPI_TX_END_INT_EN | + SPRD_SPI_RX_END_INT_EN, + ss->base + SPRD_SPI_INT_EN); +} + +static void sprd_spi_irq_disable(struct sprd_spi *ss) +{ + writel_relaxed(0, ss->base + SPRD_SPI_INT_EN); +} + +static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable) +{ + u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2); + + if (enable) + val |= SPRD_SPI_DMA_EN; + else + val &= ~SPRD_SPI_DMA_EN; + + writel_relaxed(val, ss->base + SPRD_SPI_CTL2); +} + +static int sprd_spi_dma_submit(struct dma_chan *dma_chan, + struct dma_slave_config *c, + struct sg_table *sg, + enum dma_transfer_direction dir) +{ + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + unsigned long flags; + int ret; + + ret = dmaengine_slave_config(dma_chan, c); + if (ret < 0) + return ret; + + flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG, + SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT); + desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags); + if (!desc) + return -ENODEV; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + return dma_submit_error(cookie); + + dma_async_issue_pending(dma_chan); + + return 0; +} + +static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t) +{ + struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX]; + struct dma_slave_config config = { + .src_addr = ss->phy_base, + .src_addr_width = ss->dma.width, + .dst_addr_width = ss->dma.width, + .dst_maxburst = ss->dma.fragmens_len, + }; + int ret; + + ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM); + if (ret) + return ret; + + return ss->dma.rx_len; +} + +static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t) +{ + struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX]; + struct dma_slave_config config = { + .dst_addr = ss->phy_base, + .src_addr_width = ss->dma.width, + .dst_addr_width = ss->dma.width, + .src_maxburst = ss->dma.fragmens_len, + }; + int ret; + + ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV); + if (ret) + return ret; + + return t->len; +} + +static int sprd_spi_dma_request(struct sprd_spi *ss) +{ + ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn"); + if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) { + if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER) + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]); + + dev_err(ss->dev, "request RX DMA channel failed!\n"); + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]); + } + + ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn"); + if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) { + if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER) + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); + + dev_err(ss->dev, "request TX DMA channel failed!\n"); + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); + } + + return 0; +} + +static void sprd_spi_dma_release(struct sprd_spi *ss) +{ + if (ss->dma.dma_chan[SPRD_SPI_RX]) + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); + + if (ss->dma.dma_chan[SPRD_SPI_TX]) + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]); +} + +static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev, + struct spi_transfer *t) +{ + struct sprd_spi *ss = spi_master_get_devdata(sdev->master); + u32 trans_len = ss->trans_len; + int ret, write_size = 0; + + reinit_completion(&ss->xfer_completion); + sprd_spi_irq_enable(ss); + if (ss->trans_mode & SPRD_SPI_TX_MODE) { + write_size = sprd_spi_dma_tx_config(ss, t); + sprd_spi_set_tx_length(ss, trans_len); + + /* + * For our 3 wires mode or dual TX line mode, we need + * to request the controller to transfer. + */ + if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL) + sprd_spi_tx_req(ss); + } else { + sprd_spi_set_rx_length(ss, trans_len); + + /* + * For our 3 wires mode or dual TX line mode, we need + * to request the controller to read. + */ + if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL) + sprd_spi_rx_req(ss); + else + write_size = ss->write_bufs(ss, trans_len); + } + + if (write_size < 0) { + ret = write_size; + dev_err(ss->dev, "failed to write, ret = %d\n", ret); + goto trans_complete; + } + + if (ss->trans_mode & SPRD_SPI_RX_MODE) { + /* + * Set up the DMA receive data length, which must be an + * integral multiple of fragment length. But when the length + * of received data is less than fragment length, DMA can be + * configured to receive data according to the actual length + * of received data. + */ + ss->dma.rx_len = t->len > ss->dma.fragmens_len ? + (t->len - t->len % ss->dma.fragmens_len) : + t->len; + ret = sprd_spi_dma_rx_config(ss, t); + if (ret < 0) { + dev_err(&sdev->dev, + "failed to configure rx DMA, ret = %d\n", ret); + goto trans_complete; + } + } + + sprd_spi_dma_enable(ss, true); + wait_for_completion(&(ss->xfer_completion)); + + if (ss->trans_mode & SPRD_SPI_TX_MODE) + ret = write_size; + else + ret = ss->dma.rx_len; + +trans_complete: + sprd_spi_dma_enable(ss, false); + sprd_spi_enter_idle(ss); + sprd_spi_irq_disable(ss); + + return ret; +} + static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz) { /* @@ -514,16 +741,22 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev, ss->trans_len = t->len; ss->read_bufs = sprd_spi_read_bufs_u8; ss->write_bufs = sprd_spi_write_bufs_u8; + ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP; break; case 16: ss->trans_len = t->len >> 1; ss->read_bufs = sprd_spi_read_bufs_u16; ss->write_bufs = sprd_spi_write_bufs_u16; + ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1; break; case 32: ss->trans_len = t->len >> 2; ss->read_bufs = sprd_spi_read_bufs_u32; ss->write_bufs = sprd_spi_write_bufs_u32; + ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2; break; default: return -EINVAL; @@ -561,7 +794,11 @@ static int sprd_spi_transfer_one(struct spi_controller *sctlr, if (ret) goto setup_err; - ret = sprd_spi_txrx_bufs(sdev, t); + if (sctlr->can_dma(sctlr, sdev, t)) + ret = sprd_spi_dma_txrx_bufs(sdev, t); + else + ret = sprd_spi_txrx_bufs(sdev, t); + if (ret == t->len) ret = 0; else if (ret >= 0) @@ -573,6 +810,53 @@ setup_err: return ret; } +static irqreturn_t sprd_spi_handle_irq(int irq, void *data) +{ + struct sprd_spi *ss = (struct sprd_spi *)data; + u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS); + + if (val & SPRD_SPI_MASK_TX_END) { + writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR); + if (!(ss->trans_mode & SPRD_SPI_RX_MODE)) + complete(&ss->xfer_completion); + + return IRQ_HANDLED; + } + + if (val & SPRD_SPI_MASK_RX_END) { + writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR); + if (ss->dma.rx_len < ss->len) { + ss->rx_buf += ss->dma.rx_len; + ss->dma.rx_len += + ss->read_bufs(ss, ss->len - ss->dma.rx_len); + } + complete(&ss->xfer_completion); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss) +{ + int ret; + + ss->irq = platform_get_irq(pdev, 0); + if (ss->irq < 0) { + dev_err(&pdev->dev, "failed to get irq resource\n"); + return ss->irq; + } + + ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq, + 0, pdev->name, ss); + if (ret) + dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n", + ss->irq, ret); + + return ret; +} + static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss) { struct clk *clk_spi, *clk_parent; @@ -603,6 +887,35 @@ static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss) return 0; } +static bool sprd_spi_can_dma(struct spi_controller *sctlr, + struct spi_device *spi, struct spi_transfer *t) +{ + struct sprd_spi *ss = spi_controller_get_devdata(sctlr); + + return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE); +} + +static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss) +{ + int ret; + + ret = sprd_spi_dma_request(ss); + if (ret) { + if (ret == -EPROBE_DEFER) + return ret; + + dev_warn(&pdev->dev, + "failed to request dma, enter no dma mode, ret = %d\n", + ret); + + return 0; + } + + ss->dma.enable = true; + + return 0; +} + static int sprd_spi_probe(struct platform_device *pdev) { struct spi_controller *sctlr; @@ -623,25 +936,36 @@ static int sprd_spi_probe(struct platform_device *pdev) goto free_controller; } + ss->phy_base = res->start; ss->dev = &pdev->dev; sctlr->dev.of_node = pdev->dev.of_node; sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL; sctlr->bus_num = pdev->id; sctlr->set_cs = sprd_spi_chipselect; sctlr->transfer_one = sprd_spi_transfer_one; + sctlr->can_dma = sprd_spi_can_dma; sctlr->auto_runtime_pm = true; sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1, SPRD_SPI_MAX_SPEED_HZ); + init_completion(&ss->xfer_completion); platform_set_drvdata(pdev, sctlr); ret = sprd_spi_clk_init(pdev, ss); if (ret) goto free_controller; - ret = clk_prepare_enable(ss->clk); + ret = sprd_spi_irq_init(pdev, ss); if (ret) goto free_controller; + ret = sprd_spi_dma_init(pdev, ss); + if (ret) + goto free_controller; + + ret = clk_prepare_enable(ss->clk); + if (ret) + goto release_dma; + ret = pm_runtime_set_active(&pdev->dev); if (ret < 0) goto disable_clk; @@ -670,6 +994,8 @@ err_rpm_put: pm_runtime_disable(&pdev->dev); disable_clk: clk_disable_unprepare(ss->clk); +release_dma: + sprd_spi_dma_release(ss); free_controller: spi_controller_put(sctlr); @@ -688,6 +1014,10 @@ static int sprd_spi_remove(struct platform_device *pdev) return ret; } + spi_controller_suspend(sctlr); + + if (ss->dma.enable) + sprd_spi_dma_release(ss); clk_disable_unprepare(ss->clk); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -700,6 +1030,9 @@ static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev) struct spi_controller *sctlr = dev_get_drvdata(dev); struct sprd_spi *ss = spi_controller_get_devdata(sctlr); + if (ss->dma.enable) + sprd_spi_dma_release(ss); + clk_disable_unprepare(ss->clk); return 0; @@ -715,7 +1048,14 @@ static int __maybe_unused sprd_spi_runtime_resume(struct device *dev) if (ret) return ret; - return 0; + if (!ss->dma.enable) + return 0; + + ret = sprd_spi_dma_request(ss); + if (ret) + clk_disable_unprepare(ss->clk); + + return ret; } static const struct dev_pm_ops sprd_spi_pm_ops = { diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index ad1e55d3d5d5..4186ed20d796 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -1,23 +1,10 @@ -/* - * STMicroelectronics STM32 SPI Controller driver (master mode only) - * - * Copyright (C) 2017, STMicroelectronics - All Rights Reserved - * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics. - * - * License terms: GPL V2.0. - * - * spi_stm32 driver is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * spi_stm32 driver is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along with - * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>. - */ +// SPDX-License-Identifier: GPL-2.0 +// +// STMicroelectronics STM32 SPI Controller driver (master mode only) +// +// Copyright (C) 2017, STMicroelectronics - All Rights Reserved +// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics. + #include <linux/debugfs.h> #include <linux/clk.h> #include <linux/delay.h> @@ -33,99 +20,251 @@ #define DRIVER_NAME "spi_stm32" -/* STM32 SPI registers */ -#define STM32_SPI_CR1 0x00 -#define STM32_SPI_CR2 0x04 -#define STM32_SPI_CFG1 0x08 -#define STM32_SPI_CFG2 0x0C -#define STM32_SPI_IER 0x10 -#define STM32_SPI_SR 0x14 -#define STM32_SPI_IFCR 0x18 -#define STM32_SPI_TXDR 0x20 -#define STM32_SPI_RXDR 0x30 -#define STM32_SPI_I2SCFGR 0x50 - -/* STM32_SPI_CR1 bit fields */ -#define SPI_CR1_SPE BIT(0) -#define SPI_CR1_MASRX BIT(8) -#define SPI_CR1_CSTART BIT(9) -#define SPI_CR1_CSUSP BIT(10) -#define SPI_CR1_HDDIR BIT(11) -#define SPI_CR1_SSI BIT(12) - -/* STM32_SPI_CR2 bit fields */ -#define SPI_CR2_TSIZE_SHIFT 0 -#define SPI_CR2_TSIZE GENMASK(15, 0) - -/* STM32_SPI_CFG1 bit fields */ -#define SPI_CFG1_DSIZE_SHIFT 0 -#define SPI_CFG1_DSIZE GENMASK(4, 0) -#define SPI_CFG1_FTHLV_SHIFT 5 -#define SPI_CFG1_FTHLV GENMASK(8, 5) -#define SPI_CFG1_RXDMAEN BIT(14) -#define SPI_CFG1_TXDMAEN BIT(15) -#define SPI_CFG1_MBR_SHIFT 28 -#define SPI_CFG1_MBR GENMASK(30, 28) -#define SPI_CFG1_MBR_MIN 0 -#define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28) - -/* STM32_SPI_CFG2 bit fields */ -#define SPI_CFG2_MIDI_SHIFT 4 -#define SPI_CFG2_MIDI GENMASK(7, 4) -#define SPI_CFG2_COMM_SHIFT 17 -#define SPI_CFG2_COMM GENMASK(18, 17) -#define SPI_CFG2_SP_SHIFT 19 -#define SPI_CFG2_SP GENMASK(21, 19) -#define SPI_CFG2_MASTER BIT(22) -#define SPI_CFG2_LSBFRST BIT(23) -#define SPI_CFG2_CPHA BIT(24) -#define SPI_CFG2_CPOL BIT(25) -#define SPI_CFG2_SSM BIT(26) -#define SPI_CFG2_AFCNTR BIT(31) - -/* STM32_SPI_IER bit fields */ -#define SPI_IER_RXPIE BIT(0) -#define SPI_IER_TXPIE BIT(1) -#define SPI_IER_DXPIE BIT(2) -#define SPI_IER_EOTIE BIT(3) -#define SPI_IER_TXTFIE BIT(4) -#define SPI_IER_OVRIE BIT(6) -#define SPI_IER_MODFIE BIT(9) -#define SPI_IER_ALL GENMASK(10, 0) - -/* STM32_SPI_SR bit fields */ -#define SPI_SR_RXP BIT(0) -#define SPI_SR_TXP BIT(1) -#define SPI_SR_EOT BIT(3) -#define SPI_SR_OVR BIT(6) -#define SPI_SR_MODF BIT(9) -#define SPI_SR_SUSP BIT(11) -#define SPI_SR_RXPLVL_SHIFT 13 -#define SPI_SR_RXPLVL GENMASK(14, 13) -#define SPI_SR_RXWNE BIT(15) - -/* STM32_SPI_IFCR bit fields */ -#define SPI_IFCR_ALL GENMASK(11, 3) - -/* STM32_SPI_I2SCFGR bit fields */ -#define SPI_I2SCFGR_I2SMOD BIT(0) - -/* SPI Master Baud Rate min/max divisor */ -#define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN) -#define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX) - -/* SPI Communication mode */ +/* STM32F4 SPI registers */ +#define STM32F4_SPI_CR1 0x00 +#define STM32F4_SPI_CR2 0x04 +#define STM32F4_SPI_SR 0x08 +#define STM32F4_SPI_DR 0x0C +#define STM32F4_SPI_I2SCFGR 0x1C + +/* STM32F4_SPI_CR1 bit fields */ +#define STM32F4_SPI_CR1_CPHA BIT(0) +#define STM32F4_SPI_CR1_CPOL BIT(1) +#define STM32F4_SPI_CR1_MSTR BIT(2) +#define STM32F4_SPI_CR1_BR_SHIFT 3 +#define STM32F4_SPI_CR1_BR GENMASK(5, 3) +#define STM32F4_SPI_CR1_SPE BIT(6) +#define STM32F4_SPI_CR1_LSBFRST BIT(7) +#define STM32F4_SPI_CR1_SSI BIT(8) +#define STM32F4_SPI_CR1_SSM BIT(9) +#define STM32F4_SPI_CR1_RXONLY BIT(10) +#define STM32F4_SPI_CR1_DFF BIT(11) +#define STM32F4_SPI_CR1_CRCNEXT BIT(12) +#define STM32F4_SPI_CR1_CRCEN BIT(13) +#define STM32F4_SPI_CR1_BIDIOE BIT(14) +#define STM32F4_SPI_CR1_BIDIMODE BIT(15) +#define STM32F4_SPI_CR1_BR_MIN 0 +#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3) + +/* STM32F4_SPI_CR2 bit fields */ +#define STM32F4_SPI_CR2_RXDMAEN BIT(0) +#define STM32F4_SPI_CR2_TXDMAEN BIT(1) +#define STM32F4_SPI_CR2_SSOE BIT(2) +#define STM32F4_SPI_CR2_FRF BIT(4) +#define STM32F4_SPI_CR2_ERRIE BIT(5) +#define STM32F4_SPI_CR2_RXNEIE BIT(6) +#define STM32F4_SPI_CR2_TXEIE BIT(7) + +/* STM32F4_SPI_SR bit fields */ +#define STM32F4_SPI_SR_RXNE BIT(0) +#define STM32F4_SPI_SR_TXE BIT(1) +#define STM32F4_SPI_SR_CHSIDE BIT(2) +#define STM32F4_SPI_SR_UDR BIT(3) +#define STM32F4_SPI_SR_CRCERR BIT(4) +#define STM32F4_SPI_SR_MODF BIT(5) +#define STM32F4_SPI_SR_OVR BIT(6) +#define STM32F4_SPI_SR_BSY BIT(7) +#define STM32F4_SPI_SR_FRE BIT(8) + +/* STM32F4_SPI_I2SCFGR bit fields */ +#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11) + +/* STM32F4 SPI Baud Rate min/max divisor */ +#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN) +#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX) + +/* STM32H7 SPI registers */ +#define STM32H7_SPI_CR1 0x00 +#define STM32H7_SPI_CR2 0x04 +#define STM32H7_SPI_CFG1 0x08 +#define STM32H7_SPI_CFG2 0x0C +#define STM32H7_SPI_IER 0x10 +#define STM32H7_SPI_SR 0x14 +#define STM32H7_SPI_IFCR 0x18 +#define STM32H7_SPI_TXDR 0x20 +#define STM32H7_SPI_RXDR 0x30 +#define STM32H7_SPI_I2SCFGR 0x50 + +/* STM32H7_SPI_CR1 bit fields */ +#define STM32H7_SPI_CR1_SPE BIT(0) +#define STM32H7_SPI_CR1_MASRX BIT(8) +#define STM32H7_SPI_CR1_CSTART BIT(9) +#define STM32H7_SPI_CR1_CSUSP BIT(10) +#define STM32H7_SPI_CR1_HDDIR BIT(11) +#define STM32H7_SPI_CR1_SSI BIT(12) + +/* STM32H7_SPI_CR2 bit fields */ +#define STM32H7_SPI_CR2_TSIZE_SHIFT 0 +#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0) + +/* STM32H7_SPI_CFG1 bit fields */ +#define STM32H7_SPI_CFG1_DSIZE_SHIFT 0 +#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0) +#define STM32H7_SPI_CFG1_FTHLV_SHIFT 5 +#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5) +#define STM32H7_SPI_CFG1_RXDMAEN BIT(14) +#define STM32H7_SPI_CFG1_TXDMAEN BIT(15) +#define STM32H7_SPI_CFG1_MBR_SHIFT 28 +#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28) +#define STM32H7_SPI_CFG1_MBR_MIN 0 +#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28) + +/* STM32H7_SPI_CFG2 bit fields */ +#define STM32H7_SPI_CFG2_MIDI_SHIFT 4 +#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4) +#define STM32H7_SPI_CFG2_COMM_SHIFT 17 +#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17) +#define STM32H7_SPI_CFG2_SP_SHIFT 19 +#define STM32H7_SPI_CFG2_SP GENMASK(21, 19) +#define STM32H7_SPI_CFG2_MASTER BIT(22) +#define STM32H7_SPI_CFG2_LSBFRST BIT(23) +#define STM32H7_SPI_CFG2_CPHA BIT(24) +#define STM32H7_SPI_CFG2_CPOL BIT(25) +#define STM32H7_SPI_CFG2_SSM BIT(26) +#define STM32H7_SPI_CFG2_AFCNTR BIT(31) + +/* STM32H7_SPI_IER bit fields */ +#define STM32H7_SPI_IER_RXPIE BIT(0) +#define STM32H7_SPI_IER_TXPIE BIT(1) +#define STM32H7_SPI_IER_DXPIE BIT(2) +#define STM32H7_SPI_IER_EOTIE BIT(3) +#define STM32H7_SPI_IER_TXTFIE BIT(4) +#define STM32H7_SPI_IER_OVRIE BIT(6) +#define STM32H7_SPI_IER_MODFIE BIT(9) +#define STM32H7_SPI_IER_ALL GENMASK(10, 0) + +/* STM32H7_SPI_SR bit fields */ +#define STM32H7_SPI_SR_RXP BIT(0) +#define STM32H7_SPI_SR_TXP BIT(1) +#define STM32H7_SPI_SR_EOT BIT(3) +#define STM32H7_SPI_SR_OVR BIT(6) +#define STM32H7_SPI_SR_MODF BIT(9) +#define STM32H7_SPI_SR_SUSP BIT(11) +#define STM32H7_SPI_SR_RXPLVL_SHIFT 13 +#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13) +#define STM32H7_SPI_SR_RXWNE BIT(15) + +/* STM32H7_SPI_IFCR bit fields */ +#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3) + +/* STM32H7_SPI_I2SCFGR bit fields */ +#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0) + +/* STM32H7 SPI Master Baud Rate min/max divisor */ +#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN) +#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX) + +/* STM32H7 SPI Communication mode */ +#define STM32H7_SPI_FULL_DUPLEX 0 +#define STM32H7_SPI_SIMPLEX_TX 1 +#define STM32H7_SPI_SIMPLEX_RX 2 +#define STM32H7_SPI_HALF_DUPLEX 3 + +/* SPI Communication type */ #define SPI_FULL_DUPLEX 0 #define SPI_SIMPLEX_TX 1 #define SPI_SIMPLEX_RX 2 -#define SPI_HALF_DUPLEX 3 +#define SPI_3WIRE_TX 3 +#define SPI_3WIRE_RX 4 #define SPI_1HZ_NS 1000000000 +/* + * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers + * without fifo buffers. + */ +#define SPI_DMA_MIN_BYTES 16 + +/** + * stm32_spi_reg - stm32 SPI register & bitfield desc + * @reg: register offset + * @mask: bitfield mask + * @shift: left shift + */ +struct stm32_spi_reg { + int reg; + int mask; + int shift; +}; + +/** + * stm32_spi_regspec - stm32 registers definition, compatible dependent data + * en: enable register and SPI enable bit + * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit + * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit + * cpol: clock polarity register and polarity bit + * cpha: clock phase register and phase bit + * lsb_first: LSB transmitted first register and bit + * br: baud rate register and bitfields + * rx: SPI RX data register + * tx: SPI TX data register + */ +struct stm32_spi_regspec { + const struct stm32_spi_reg en; + const struct stm32_spi_reg dma_rx_en; + const struct stm32_spi_reg dma_tx_en; + const struct stm32_spi_reg cpol; + const struct stm32_spi_reg cpha; + const struct stm32_spi_reg lsb_first; + const struct stm32_spi_reg br; + const struct stm32_spi_reg rx; + const struct stm32_spi_reg tx; +}; + +struct stm32_spi; + +/** + * stm32_spi_cfg - stm32 compatible configuration data + * @regs: registers descriptions + * @get_fifo_size: routine to get fifo size + * @get_bpw_mask: routine to get bits per word mask + * @disable: routine to disable controller + * @config: routine to configure controller as SPI Master + * @set_bpw: routine to configure registers to for bits per word + * @set_mode: routine to configure registers to desired mode + * @set_data_idleness: optional routine to configure registers to desired idle + * time between frames (if driver has this functionality) + * set_number_of_data: optional routine to configure registers to desired + * number of data (if driver has this functionality) + * @can_dma: routine to determine if the transfer is eligible for DMA use + * @transfer_one_dma_start: routine to start transfer a single spi_transfer + * using DMA + * @dma_rx cb: routine to call after DMA RX channel operation is complete + * @dma_tx cb: routine to call after DMA TX channel operation is complete + * @transfer_one_irq: routine to configure interrupts for driver + * @irq_handler_event: Interrupt handler for SPI controller events + * @irq_handler_thread: thread of interrupt handler for SPI controller + * @baud_rate_div_min: minimum baud rate divisor + * @baud_rate_div_max: maximum baud rate divisor + * @has_fifo: boolean to know if fifo is used for driver + * @has_startbit: boolean to know if start bit is used to start transfer + */ +struct stm32_spi_cfg { + const struct stm32_spi_regspec *regs; + int (*get_fifo_size)(struct stm32_spi *spi); + int (*get_bpw_mask)(struct stm32_spi *spi); + void (*disable)(struct stm32_spi *spi); + int (*config)(struct stm32_spi *spi); + void (*set_bpw)(struct stm32_spi *spi); + int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type); + void (*set_data_idleness)(struct stm32_spi *spi, u32 length); + int (*set_number_of_data)(struct stm32_spi *spi, u32 length); + void (*transfer_one_dma_start)(struct stm32_spi *spi); + void (*dma_rx_cb)(void *data); + void (*dma_tx_cb)(void *data); + int (*transfer_one_irq)(struct stm32_spi *spi); + irqreturn_t (*irq_handler_event)(int irq, void *dev_id); + irqreturn_t (*irq_handler_thread)(int irq, void *dev_id); + unsigned int baud_rate_div_min; + unsigned int baud_rate_div_max; + bool has_fifo; +}; + /** * struct stm32_spi - private data of the SPI controller * @dev: driver model representation of the controller * @master: controller master interface + * @cfg: compatible configuration data * @base: virtual memory area * @clk: hw kernel clock feeding the SPI clock generator * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator @@ -151,6 +290,7 @@ struct stm32_spi { struct device *dev; struct spi_master *master; + const struct stm32_spi_cfg *cfg; void __iomem *base; struct clk *clk; u32 clk_rate; @@ -176,6 +316,40 @@ struct stm32_spi { dma_addr_t phys_addr; }; +static const struct stm32_spi_regspec stm32f4_spi_regspec = { + .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE }, + + .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN }, + .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN }, + + .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL }, + .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA }, + .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST }, + .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT }, + + .rx = { STM32F4_SPI_DR }, + .tx = { STM32F4_SPI_DR }, +}; + +static const struct stm32_spi_regspec stm32h7_spi_regspec = { + /* SPI data transfer is enabled but spi_ker_ck is idle. + * CFG1 and CFG2 registers are write protected when SPE is enabled. + */ + .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE }, + + .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN }, + .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN }, + + .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL }, + .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA }, + .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST }, + .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR, + STM32H7_SPI_CFG1_MBR_SHIFT }, + + .rx = { STM32H7_SPI_RXDR }, + .tx = { STM32H7_SPI_TXDR }, +}; + static inline void stm32_spi_set_bits(struct stm32_spi *spi, u32 offset, u32 bits) { @@ -191,22 +365,22 @@ static inline void stm32_spi_clr_bits(struct stm32_spi *spi, } /** - * stm32_spi_get_fifo_size - Return fifo size + * stm32h7_spi_get_fifo_size - Return fifo size * @spi: pointer to the spi controller data structure */ -static int stm32_spi_get_fifo_size(struct stm32_spi *spi) +static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi) { unsigned long flags; u32 count = 0; spin_lock_irqsave(&spi->lock, flags); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); - while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP) - writeb_relaxed(++count, spi->base + STM32_SPI_TXDR); + while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP) + writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR); - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); spin_unlock_irqrestore(&spi->lock, flags); @@ -216,10 +390,20 @@ static int stm32_spi_get_fifo_size(struct stm32_spi *spi) } /** - * stm32_spi_get_bpw_mask - Return bits per word mask + * stm32f4_spi_get_bpw_mask - Return bits per word mask * @spi: pointer to the spi controller data structure */ -static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) +static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi) +{ + dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n"); + return SPI_BPW_MASK(8) | SPI_BPW_MASK(16); +} + +/** + * stm32h7_spi_get_bpw_mask - Return bits per word mask + * @spi: pointer to the spi controller data structure + */ +static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi) { unsigned long flags; u32 cfg1, max_bpw; @@ -230,10 +414,11 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) * The most significant bit at DSIZE bit field is reserved when the * maximum data size of periperal instances is limited to 16-bit */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE); + stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE); - cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1); - max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT; + cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1); + max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >> + STM32H7_SPI_CFG1_DSIZE_SHIFT; max_bpw += 1; spin_unlock_irqrestore(&spi->lock, flags); @@ -244,13 +429,16 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) } /** - * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value + * stm32_spi_prepare_mbr - Determine baud rate divisor value * @spi: pointer to the spi controller data structure * @speed_hz: requested speed + * @min_div: minimum baud rate divisor + * @max_div: maximum baud rate divisor * - * Return SPI_CFG1.MBR value in case of success or -EINVAL + * Return baud rate divisor value in case of success or -EINVAL */ -static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) +static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, + u32 min_div, u32 max_div) { u32 div, mbrdiv; @@ -263,8 +451,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) * no need to check it there. * However, we need to ensure the following calculations. */ - if (div < SPI_MBR_DIV_MIN || - div > SPI_MBR_DIV_MAX) + if ((div < min_div) || (div > max_div)) return -EINVAL; /* Determine the first power of 2 greater than or equal to div */ @@ -279,10 +466,10 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) } /** - * stm32_spi_prepare_fthlv - Determine FIFO threshold level + * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level * @spi: pointer to the spi controller data structure */ -static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi) +static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) { u32 fthlv, half_fifo; @@ -306,32 +493,62 @@ static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi) } /** - * stm32_spi_write_txfifo - Write bytes in Transmit Data Register + * stm32f4_spi_write_tx - Write bytes to Transmit Data Register * @spi: pointer to the spi controller data structure * * Read from tx_buf depends on remaining bytes to avoid to read beyond * tx_buf end. */ -static void stm32_spi_write_txfifo(struct stm32_spi *spi) +static void stm32f4_spi_write_tx(struct stm32_spi *spi) +{ + if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) & + STM32F4_SPI_SR_TXE)) { + u32 offs = spi->cur_xferlen - spi->tx_len; + + if (spi->cur_bpw == 16) { + const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); + + writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR); + spi->tx_len -= sizeof(u16); + } else { + const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); + + writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR); + spi->tx_len -= sizeof(u8); + } + } + + dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len); +} + +/** + * stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register + * @spi: pointer to the spi controller data structure + * + * Read from tx_buf depends on remaining bytes to avoid to read beyond + * tx_buf end. + */ +static void stm32h7_spi_write_txfifo(struct stm32_spi *spi) { while ((spi->tx_len > 0) && - (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) { + (readl_relaxed(spi->base + STM32H7_SPI_SR) & + STM32H7_SPI_SR_TXP)) { u32 offs = spi->cur_xferlen - spi->tx_len; if (spi->tx_len >= sizeof(u32)) { const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs); - writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR); + writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u32); } else if (spi->tx_len >= sizeof(u16)) { const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); - writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR); + writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u16); } else { const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); - writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR); + writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u8); } } @@ -340,43 +557,74 @@ static void stm32_spi_write_txfifo(struct stm32_spi *spi) } /** - * stm32_spi_read_rxfifo - Read bytes in Receive Data Register + * stm32f4_spi_read_rx - Read bytes from Receive Data Register + * @spi: pointer to the spi controller data structure + * + * Write in rx_buf depends on remaining bytes to avoid to write beyond + * rx_buf end. + */ +static void stm32f4_spi_read_rx(struct stm32_spi *spi) +{ + if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) & + STM32F4_SPI_SR_RXNE)) { + u32 offs = spi->cur_xferlen - spi->rx_len; + + if (spi->cur_bpw == 16) { + u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); + + *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR); + spi->rx_len -= sizeof(u16); + } else { + u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); + + *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR); + spi->rx_len -= sizeof(u8); + } + } + + dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len); +} + +/** + * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register * @spi: pointer to the spi controller data structure * * Write in rx_buf depends on remaining bytes to avoid to write beyond * rx_buf end. */ -static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush) +static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush) { - u32 sr = readl_relaxed(spi->base + STM32_SPI_SR); - u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT; + u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >> + STM32H7_SPI_SR_RXPLVL_SHIFT; while ((spi->rx_len > 0) && - ((sr & SPI_SR_RXP) || - (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) { + ((sr & STM32H7_SPI_SR_RXP) || + (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) { u32 offs = spi->cur_xferlen - spi->rx_len; if ((spi->rx_len >= sizeof(u32)) || - (flush && (sr & SPI_SR_RXWNE))) { + (flush && (sr & STM32H7_SPI_SR_RXWNE))) { u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs); - *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u32); } else if ((spi->rx_len >= sizeof(u16)) || (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) { u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); - *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u16); } else { u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); - *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u8); } - sr = readl_relaxed(spi->base + STM32_SPI_SR); - rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT; + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >> + STM32H7_SPI_SR_RXPLVL_SHIFT; } dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__, @@ -386,26 +634,76 @@ static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush) /** * stm32_spi_enable - Enable SPI controller * @spi: pointer to the spi controller data structure - * - * SPI data transfer is enabled but spi_ker_ck is idle. - * SPI_CFG1 and SPI_CFG2 are now write protected. */ static void stm32_spi_enable(struct stm32_spi *spi) { dev_dbg(spi->dev, "enable controller\n"); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_set_bits(spi, spi->cfg->regs->en.reg, + spi->cfg->regs->en.mask); } /** - * stm32_spi_disable - Disable SPI controller + * stm32f4_spi_disable - Disable SPI controller + * @spi: pointer to the spi controller data structure + */ +static void stm32f4_spi_disable(struct stm32_spi *spi) +{ + unsigned long flags; + u32 sr; + + dev_dbg(spi->dev, "disable controller\n"); + + spin_lock_irqsave(&spi->lock, flags); + + if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) & + STM32F4_SPI_CR1_SPE)) { + spin_unlock_irqrestore(&spi->lock, flags); + return; + } + + /* Disable interrupts */ + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE | + STM32F4_SPI_CR2_RXNEIE | + STM32F4_SPI_CR2_ERRIE); + + /* Wait until BSY = 0 */ + if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR, + sr, !(sr & STM32F4_SPI_SR_BSY), + 10, 100000) < 0) { + dev_warn(spi->dev, "disabling condition timeout\n"); + } + + if (spi->cur_usedma && spi->dma_tx) + dmaengine_terminate_all(spi->dma_tx); + if (spi->cur_usedma && spi->dma_rx) + dmaengine_terminate_all(spi->dma_rx); + + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE); + + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN | + STM32F4_SPI_CR2_RXDMAEN); + + /* Sequence to clear OVR flag */ + readl_relaxed(spi->base + STM32F4_SPI_DR); + readl_relaxed(spi->base + STM32F4_SPI_SR); + + spin_unlock_irqrestore(&spi->lock, flags); +} + +/** + * stm32h7_spi_disable - Disable SPI controller * @spi: pointer to the spi controller data structure * * RX-Fifo is flushed when SPI controller is disabled. To prevent any data - * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in + * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in * RX-Fifo. + * Normally, if TSIZE has been configured, we should relax the hardware at the + * reception of the EOT interrupt. But in case of error, EOT will not be + * raised. So the subsystem unprepare_message call allows us to properly + * complete the transfer from an hardware point of view. */ -static void stm32_spi_disable(struct stm32_spi *spi) +static void stm32h7_spi_disable(struct stm32_spi *spi) { unsigned long flags; u32 cr1, sr; @@ -414,23 +712,23 @@ static void stm32_spi_disable(struct stm32_spi *spi) spin_lock_irqsave(&spi->lock, flags); - cr1 = readl_relaxed(spi->base + STM32_SPI_CR1); + cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1); - if (!(cr1 & SPI_CR1_SPE)) { + if (!(cr1 & STM32H7_SPI_CR1_SPE)) { spin_unlock_irqrestore(&spi->lock, flags); return; } /* Wait on EOT or suspend the flow */ - if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR, - sr, !(sr & SPI_SR_EOT), + if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR, + sr, !(sr & STM32H7_SPI_SR_EOT), 10, 100000) < 0) { - if (cr1 & SPI_CR1_CSTART) { - writel_relaxed(cr1 | SPI_CR1_CSUSP, - spi->base + STM32_SPI_CR1); + if (cr1 & STM32H7_SPI_CR1_CSTART) { + writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP, + spi->base + STM32H7_SPI_CR1); if (readl_relaxed_poll_timeout_atomic( - spi->base + STM32_SPI_SR, - sr, !(sr & SPI_SR_SUSP), + spi->base + STM32H7_SPI_SR, + sr, !(sr & STM32H7_SPI_SR_SUSP), 10, 100000) < 0) dev_warn(spi->dev, "Suspend request timeout\n"); @@ -438,21 +736,21 @@ static void stm32_spi_disable(struct stm32_spi *spi) } if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0)) - stm32_spi_read_rxfifo(spi, true); + stm32h7_spi_read_rxfifo(spi, true); - if (spi->cur_usedma && spi->tx_buf) + if (spi->cur_usedma && spi->dma_tx) dmaengine_terminate_all(spi->dma_tx); - if (spi->cur_usedma && spi->rx_buf) + if (spi->cur_usedma && spi->dma_rx) dmaengine_terminate_all(spi->dma_rx); - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); - stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN | - SPI_CFG1_RXDMAEN); + stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN | + STM32H7_SPI_CFG1_RXDMAEN); /* Disable interrupts and clear status flags */ - writel_relaxed(0, spi->base + STM32_SPI_IER); - writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR); + writel_relaxed(0, spi->base + STM32H7_SPI_IER); + writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); } @@ -460,26 +758,136 @@ static void stm32_spi_disable(struct stm32_spi *spi) /** * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use * - * If the current transfer size is greater than fifo size, use DMA. + * If driver has fifo and the current transfer size is greater than fifo size, + * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes. */ static bool stm32_spi_can_dma(struct spi_master *master, struct spi_device *spi_dev, struct spi_transfer *transfer) { + unsigned int dma_size; struct stm32_spi *spi = spi_master_get_devdata(master); + if (spi->cfg->has_fifo) + dma_size = spi->fifo_size; + else + dma_size = SPI_DMA_MIN_BYTES; + dev_dbg(spi->dev, "%s: %s\n", __func__, - (transfer->len > spi->fifo_size) ? "true" : "false"); + (transfer->len > dma_size) ? "true" : "false"); + + return (transfer->len > dma_size); +} + +/** + * stm32f4_spi_irq_event - Interrupt handler for SPI controller events + * @irq: interrupt line + * @dev_id: SPI controller master interface + */ +static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct stm32_spi *spi = spi_master_get_devdata(master); + u32 sr, mask = 0; + unsigned long flags; + bool end = false; + + spin_lock_irqsave(&spi->lock, flags); + + sr = readl_relaxed(spi->base + STM32F4_SPI_SR); + /* + * BSY flag is not handled in interrupt but it is normal behavior when + * this flag is set. + */ + sr &= ~STM32F4_SPI_SR_BSY; + + if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX || + spi->cur_comm == SPI_3WIRE_TX)) { + /* OVR flag shouldn't be handled for TX only mode */ + sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE; + mask |= STM32F4_SPI_SR_TXE; + } + + if (!spi->cur_usedma && spi->cur_comm == SPI_FULL_DUPLEX) { + /* TXE flag is set and is handled when RXNE flag occurs */ + sr &= ~STM32F4_SPI_SR_TXE; + mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR; + } + + if (!(sr & mask)) { + dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr); + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_NONE; + } + + if (sr & STM32F4_SPI_SR_OVR) { + dev_warn(spi->dev, "Overrun: received value discarded\n"); + + /* Sequence to clear OVR flag */ + readl_relaxed(spi->base + STM32F4_SPI_DR); + readl_relaxed(spi->base + STM32F4_SPI_SR); + + /* + * If overrun is detected, it means that something went wrong, + * so stop the current transfer. Transfer can wait for next + * RXNE but DR is already read and end never happens. + */ + end = true; + goto end_irq; + } + + if (sr & STM32F4_SPI_SR_TXE) { + if (spi->tx_buf) + stm32f4_spi_write_tx(spi); + if (spi->tx_len == 0) + end = true; + } + + if (sr & STM32F4_SPI_SR_RXNE) { + stm32f4_spi_read_rx(spi); + if (spi->rx_len == 0) + end = true; + else /* Load data for discontinuous mode */ + stm32f4_spi_write_tx(spi); + } + +end_irq: + if (end) { + /* Immediately disable interrupts to do not generate new one */ + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, + STM32F4_SPI_CR2_TXEIE | + STM32F4_SPI_CR2_RXNEIE | + STM32F4_SPI_CR2_ERRIE); + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_WAKE_THREAD; + } + + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_HANDLED; +} + +/** + * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller + * @irq: interrupt line + * @dev_id: SPI controller master interface + */ +static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct stm32_spi *spi = spi_master_get_devdata(master); + + spi_finalize_current_transfer(master); + stm32f4_spi_disable(spi); - return (transfer->len > spi->fifo_size); + return IRQ_HANDLED; } /** - * stm32_spi_irq - Interrupt handler for SPI controller events + * stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller * @irq: interrupt line * @dev_id: SPI controller master interface */ -static irqreturn_t stm32_spi_irq(int irq, void *dev_id) +static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) { struct spi_master *master = dev_id; struct stm32_spi *spi = spi_master_get_devdata(master); @@ -489,19 +897,19 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) spin_lock_irqsave(&spi->lock, flags); - sr = readl_relaxed(spi->base + STM32_SPI_SR); - ier = readl_relaxed(spi->base + STM32_SPI_IER); + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + ier = readl_relaxed(spi->base + STM32H7_SPI_IER); mask = ier; /* EOTIE is triggered on EOT, SUSP and TXC events. */ - mask |= SPI_SR_SUSP; + mask |= STM32H7_SPI_SR_SUSP; /* * When TXTF is set, DXPIE and TXPIE are cleared. So in case of * Full-Duplex, need to poll RXP event to know if there are remaining * data, before disabling SPI. */ if (spi->rx_buf && !spi->cur_usedma) - mask |= SPI_SR_RXP; + mask |= STM32H7_SPI_SR_RXP; if (!(sr & mask)) { dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", @@ -510,10 +918,10 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) return IRQ_NONE; } - if (sr & SPI_SR_SUSP) { + if (sr & STM32H7_SPI_SR_SUSP) { dev_warn(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); /* * If communication is suspended while using DMA, it means * that something went wrong, so stop the current transfer @@ -522,15 +930,15 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) end = true; } - if (sr & SPI_SR_MODF) { + if (sr & STM32H7_SPI_SR_MODF) { dev_warn(spi->dev, "Mode fault: transfer aborted\n"); end = true; } - if (sr & SPI_SR_OVR) { + if (sr & STM32H7_SPI_SR_OVR) { dev_warn(spi->dev, "Overrun: received value discarded\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); /* * If overrun is detected while using DMA, it means that * something went wrong, so stop the current transfer @@ -539,27 +947,27 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) end = true; } - if (sr & SPI_SR_EOT) { + if (sr & STM32H7_SPI_SR_EOT) { if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, true); + stm32h7_spi_read_rxfifo(spi, true); end = true; } - if (sr & SPI_SR_TXP) + if (sr & STM32H7_SPI_SR_TXP) if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0))) - stm32_spi_write_txfifo(spi); + stm32h7_spi_write_txfifo(spi); - if (sr & SPI_SR_RXP) + if (sr & STM32H7_SPI_SR_RXP) if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); - writel_relaxed(mask, spi->base + STM32_SPI_IFCR); + writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); if (end) { spi_finalize_current_transfer(master); - stm32_spi_disable(spi); + stm32h7_spi_disable(spi); } return IRQ_HANDLED; @@ -598,7 +1006,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master, struct spi_device *spi_dev = msg->spi; struct device_node *np = spi_dev->dev.of_node; unsigned long flags; - u32 cfg2_clrb = 0, cfg2_setb = 0; + u32 clrb = 0, setb = 0; /* SPI slave device may need time between data frames */ spi->cur_midi = 0; @@ -606,19 +1014,19 @@ static int stm32_spi_prepare_msg(struct spi_master *master, dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi); if (spi_dev->mode & SPI_CPOL) - cfg2_setb |= SPI_CFG2_CPOL; + setb |= spi->cfg->regs->cpol.mask; else - cfg2_clrb |= SPI_CFG2_CPOL; + clrb |= spi->cfg->regs->cpol.mask; if (spi_dev->mode & SPI_CPHA) - cfg2_setb |= SPI_CFG2_CPHA; + setb |= spi->cfg->regs->cpha.mask; else - cfg2_clrb |= SPI_CFG2_CPHA; + clrb |= spi->cfg->regs->cpha.mask; if (spi_dev->mode & SPI_LSB_FIRST) - cfg2_setb |= SPI_CFG2_LSBFRST; + setb |= spi->cfg->regs->lsb_first.mask; else - cfg2_clrb |= SPI_CFG2_LSBFRST; + clrb |= spi->cfg->regs->lsb_first.mask; dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n", spi_dev->mode & SPI_CPOL, @@ -628,11 +1036,12 @@ static int stm32_spi_prepare_msg(struct spi_master *master, spin_lock_irqsave(&spi->lock, flags); - if (cfg2_clrb || cfg2_setb) + /* CPOL, CPHA and LSB FIRST bits have common register */ + if (clrb || setb) writel_relaxed( - (readl_relaxed(spi->base + STM32_SPI_CFG2) & - ~cfg2_clrb) | cfg2_setb, - spi->base + STM32_SPI_CFG2); + (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) & + ~clrb) | setb, + spi->base + spi->cfg->regs->cpol.reg); spin_unlock_irqrestore(&spi->lock, flags); @@ -640,12 +1049,40 @@ static int stm32_spi_prepare_msg(struct spi_master *master, } /** - * stm32_spi_dma_cb - dma callback + * stm32f4_spi_dma_tx_cb - dma callback + * + * DMA callback is called when the transfer is complete for DMA TX channel. + */ +static void stm32f4_spi_dma_tx_cb(void *data) +{ + struct stm32_spi *spi = data; + + if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { + spi_finalize_current_transfer(spi->master); + stm32f4_spi_disable(spi); + } +} + +/** + * stm32f4_spi_dma_rx_cb - dma callback + * + * DMA callback is called when the transfer is complete for DMA RX channel. + */ +static void stm32f4_spi_dma_rx_cb(void *data) +{ + struct stm32_spi *spi = data; + + spi_finalize_current_transfer(spi->master); + stm32f4_spi_disable(spi); +} + +/** + * stm32h7_spi_dma_cb - dma callback * * DMA callback is called when the transfer is complete or when an error * occurs. If the transfer is complete, EOT flag is raised. */ -static void stm32_spi_dma_cb(void *data) +static void stm32h7_spi_dma_cb(void *data) { struct stm32_spi *spi = data; unsigned long flags; @@ -653,11 +1090,11 @@ static void stm32_spi_dma_cb(void *data) spin_lock_irqsave(&spi->lock, flags); - sr = readl_relaxed(spi->base + STM32_SPI_SR); + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); spin_unlock_irqrestore(&spi->lock, flags); - if (!(sr & SPI_SR_EOT)) + if (!(sr & STM32H7_SPI_SR_EOT)) dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr); /* Now wait for EOT, or SUSP or OVR in case of error */ @@ -681,23 +1118,27 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, else buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; - /* Valid for DMA Half or Full Fifo threshold */ - if (spi->cur_fthlv == 2) + if (spi->cfg->has_fifo) { + /* Valid for DMA Half or Full Fifo threshold */ + if (spi->cur_fthlv == 2) + maxburst = 1; + else + maxburst = spi->cur_fthlv; + } else { maxburst = 1; - else - maxburst = spi->cur_fthlv; + } memset(dma_conf, 0, sizeof(struct dma_slave_config)); dma_conf->direction = dir; if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */ - dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR; + dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg; dma_conf->src_addr_width = buswidth; dma_conf->src_maxburst = maxburst; dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n", buswidth, maxburst); } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */ - dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR; + dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg; dma_conf->dst_addr_width = buswidth; dma_conf->dst_maxburst = maxburst; @@ -707,27 +1148,68 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, } /** - * stm32_spi_transfer_one_irq - transfer a single spi_transfer using - * interrupts + * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using + * interrupts * * It must returns 0 if the transfer is finished or 1 if the transfer is still * in progress. */ -static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) +static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi) +{ + unsigned long flags; + u32 cr2 = 0; + + /* Enable the interrupts relative to the current communication mode */ + if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { + cr2 |= STM32F4_SPI_CR2_TXEIE; + } else if (spi->cur_comm == SPI_FULL_DUPLEX) { + /* In transmit-only mode, the OVR flag is set in the SR register + * since the received data are never read. Therefore set OVR + * interrupt only when rx buffer is available. + */ + cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE; + } else { + return -EINVAL; + } + + spin_lock_irqsave(&spi->lock, flags); + + stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2); + + stm32_spi_enable(spi); + + /* starting data transfer when buffer is loaded */ + if (spi->tx_buf) + stm32f4_spi_write_tx(spi); + + spin_unlock_irqrestore(&spi->lock, flags); + + return 1; +} + +/** + * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using + * interrupts + * + * It must returns 0 if the transfer is finished or 1 if the transfer is still + * in progress. + */ +static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi) { unsigned long flags; u32 ier = 0; /* Enable the interrupts relative to the current communication mode */ if (spi->tx_buf && spi->rx_buf) /* Full Duplex */ - ier |= SPI_IER_DXPIE; + ier |= STM32H7_SPI_IER_DXPIE; else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */ - ier |= SPI_IER_TXPIE; + ier |= STM32H7_SPI_IER_TXPIE; else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */ - ier |= SPI_IER_RXPIE; + ier |= STM32H7_SPI_IER_RXPIE; /* Enable the interrupts relative to the end of transfer */ - ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE; + ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE | + STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE; spin_lock_irqsave(&spi->lock, flags); @@ -735,11 +1217,11 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) /* Be sure to have data in fifo before starting data transfer */ if (spi->tx_buf) - stm32_spi_write_txfifo(spi); + stm32h7_spi_write_txfifo(spi); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); - writel_relaxed(ier, spi->base + STM32_SPI_IER); + writel_relaxed(ier, spi->base + STM32H7_SPI_IER); spin_unlock_irqrestore(&spi->lock, flags); @@ -747,6 +1229,43 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) } /** + * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start + * transfer using DMA + */ +static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi) +{ + /* In DMA mode end of transfer is handled by DMA TX or RX callback. */ + if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX || + spi->cur_comm == SPI_FULL_DUPLEX) { + /* + * In transmit-only mode, the OVR flag is set in the SR register + * since the received data are never read. Therefore set OVR + * interrupt only when rx buffer is available. + */ + stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE); + } + + stm32_spi_enable(spi); +} + +/** + * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start + * transfer using DMA + */ +static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) +{ + /* Enable the interrupts relative to the end of transfer */ + stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE | + STM32H7_SPI_IER_TXTFIE | + STM32H7_SPI_IER_OVRIE | + STM32H7_SPI_IER_MODFIE); + + stm32_spi_enable(spi); + + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); +} + +/** * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA * * It must returns 0 if the transfer is finished or 1 if the transfer is still @@ -758,17 +1277,17 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, struct dma_slave_config tx_dma_conf, rx_dma_conf; struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc; unsigned long flags; - u32 ier = 0; spin_lock_irqsave(&spi->lock, flags); rx_dma_desc = NULL; - if (spi->rx_buf) { + if (spi->rx_buf && spi->dma_rx) { stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM); dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); /* Enable Rx DMA request */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN); + stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg, + spi->cfg->regs->dma_rx_en.mask); rx_dma_desc = dmaengine_prep_slave_sg( spi->dma_rx, xfer->rx_sg.sgl, @@ -778,7 +1297,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, } tx_dma_desc = NULL; - if (spi->tx_buf) { + if (spi->tx_buf && spi->dma_tx) { stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV); dmaengine_slave_config(spi->dma_tx, &tx_dma_conf); @@ -789,12 +1308,15 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, DMA_PREP_INTERRUPT); } - if ((spi->tx_buf && !tx_dma_desc) || - (spi->rx_buf && !rx_dma_desc)) + if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) || + (spi->rx_buf && spi->dma_rx && !rx_dma_desc)) + goto dma_desc_error; + + if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc)) goto dma_desc_error; if (rx_dma_desc) { - rx_dma_desc->callback = stm32_spi_dma_cb; + rx_dma_desc->callback = spi->cfg->dma_rx_cb; rx_dma_desc->callback_param = spi; if (dma_submit_error(dmaengine_submit(rx_dma_desc))) { @@ -806,8 +1328,9 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, } if (tx_dma_desc) { - if (spi->cur_comm == SPI_SIMPLEX_TX) { - tx_dma_desc->callback = stm32_spi_dma_cb; + if (spi->cur_comm == SPI_SIMPLEX_TX || + spi->cur_comm == SPI_3WIRE_TX) { + tx_dma_desc->callback = spi->cfg->dma_tx_cb; tx_dma_desc->callback_param = spi; } @@ -819,130 +1342,278 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, dma_async_issue_pending(spi->dma_tx); /* Enable Tx DMA request */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN); + stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg, + spi->cfg->regs->dma_tx_en.mask); } - /* Enable the interrupts relative to the end of transfer */ - ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE; - writel_relaxed(ier, spi->base + STM32_SPI_IER); - - stm32_spi_enable(spi); - - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART); + spi->cfg->transfer_one_dma_start(spi); spin_unlock_irqrestore(&spi->lock, flags); return 1; dma_submit_error: - if (spi->rx_buf) + if (spi->dma_rx) dmaengine_terminate_all(spi->dma_rx); dma_desc_error: - stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN); + stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg, + spi->cfg->regs->dma_rx_en.mask); spin_unlock_irqrestore(&spi->lock, flags); dev_info(spi->dev, "DMA issue: fall back to irq transfer\n"); - return stm32_spi_transfer_one_irq(spi); + spi->cur_usedma = false; + return spi->cfg->transfer_one_irq(spi); } /** - * stm32_spi_transfer_one_setup - common setup to transfer a single - * spi_transfer either using DMA or - * interrupts. + * stm32f4_spi_set_bpw - Configure bits per word + * @spi: pointer to the spi controller data structure */ -static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, - struct spi_device *spi_dev, - struct spi_transfer *transfer) +static void stm32f4_spi_set_bpw(struct stm32_spi *spi) { - unsigned long flags; - u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0; - u32 mode, nb_words; - int ret = 0; - - spin_lock_irqsave(&spi->lock, flags); + if (spi->cur_bpw == 16) + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF); + else + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF); +} - if (spi->cur_bpw != transfer->bits_per_word) { - u32 bpw, fthlv; +/** + * stm32h7_spi_set_bpw - configure bits per word + * @spi: pointer to the spi controller data structure + */ +static void stm32h7_spi_set_bpw(struct stm32_spi *spi) +{ + u32 bpw, fthlv; + u32 cfg1_clrb = 0, cfg1_setb = 0; - spi->cur_bpw = transfer->bits_per_word; - bpw = spi->cur_bpw - 1; + bpw = spi->cur_bpw - 1; - cfg1_clrb |= SPI_CFG1_DSIZE; - cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE; + cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE; + cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) & + STM32H7_SPI_CFG1_DSIZE; - spi->cur_fthlv = stm32_spi_prepare_fthlv(spi); - fthlv = spi->cur_fthlv - 1; + spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi); + fthlv = spi->cur_fthlv - 1; - cfg1_clrb |= SPI_CFG1_FTHLV; - cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV; - } + cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV; + cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) & + STM32H7_SPI_CFG1_FTHLV; - if (spi->cur_speed != transfer->speed_hz) { - int mbr; + writel_relaxed( + (readl_relaxed(spi->base + STM32H7_SPI_CFG1) & + ~cfg1_clrb) | cfg1_setb, + spi->base + STM32H7_SPI_CFG1); +} - /* Update spi->cur_speed with real clock speed */ - mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz); - if (mbr < 0) { - ret = mbr; - goto out; - } +/** + * stm32_spi_set_mbr - Configure baud rate divisor in master mode + * @spi: pointer to the spi controller data structure + * @mbrdiv: baud rate divisor value + */ +static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv) +{ + u32 clrb = 0, setb = 0; - transfer->speed_hz = spi->cur_speed; + clrb |= spi->cfg->regs->br.mask; + setb |= ((u32)mbrdiv << spi->cfg->regs->br.shift) & + spi->cfg->regs->br.mask; - cfg1_clrb |= SPI_CFG1_MBR; - cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR; - } + writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) & + ~clrb) | setb, + spi->base + spi->cfg->regs->br.reg); +} - if (cfg1_clrb || cfg1_setb) - writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) & - ~cfg1_clrb) | cfg1_setb, - spi->base + STM32_SPI_CFG1); +/** + * stm32_spi_communication_type - return transfer communication type + * @spi_dev: pointer to the spi device + * transfer: pointer to spi transfer + */ +static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev, + struct spi_transfer *transfer) +{ + unsigned int type = SPI_FULL_DUPLEX; - mode = SPI_FULL_DUPLEX; if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */ /* * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL - * is forbidden und unvalidated by SPI subsystem so depending + * is forbidden and unvalidated by SPI subsystem so depending * on the valid buffer, we can determine the direction of the * transfer. */ - mode = SPI_HALF_DUPLEX; if (!transfer->tx_buf) - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR); - else if (!transfer->rx_buf) - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR); + type = SPI_3WIRE_RX; + else + type = SPI_3WIRE_TX; } else { if (!transfer->tx_buf) - mode = SPI_SIMPLEX_RX; + type = SPI_SIMPLEX_RX; else if (!transfer->rx_buf) - mode = SPI_SIMPLEX_TX; + type = SPI_SIMPLEX_TX; + } + + return type; +} + +/** + * stm32f4_spi_set_mode - configure communication mode + * @spi: pointer to the spi controller data structure + * @comm_type: type of communication to configure + */ +static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) +{ + if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) { + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, + STM32F4_SPI_CR1_BIDIMODE | + STM32F4_SPI_CR1_BIDIOE); + } else if (comm_type == SPI_FULL_DUPLEX) { + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, + STM32F4_SPI_CR1_BIDIMODE | + STM32F4_SPI_CR1_BIDIOE); + } else { + return -EINVAL; } - if (spi->cur_comm != mode) { - spi->cur_comm = mode; - cfg2_clrb |= SPI_CFG2_COMM; - cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM; + return 0; +} + +/** + * stm32h7_spi_set_mode - configure communication mode + * @spi: pointer to the spi controller data structure + * @comm_type: type of communication to configure + */ +static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) +{ + u32 mode; + u32 cfg2_clrb = 0, cfg2_setb = 0; + + if (comm_type == SPI_3WIRE_RX) { + mode = STM32H7_SPI_HALF_DUPLEX; + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR); + } else if (comm_type == SPI_3WIRE_TX) { + mode = STM32H7_SPI_HALF_DUPLEX; + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR); + } else if (comm_type == SPI_SIMPLEX_RX) { + mode = STM32H7_SPI_SIMPLEX_RX; + } else if (comm_type == SPI_SIMPLEX_TX) { + mode = STM32H7_SPI_SIMPLEX_TX; + } else { + mode = STM32H7_SPI_FULL_DUPLEX; } - cfg2_clrb |= SPI_CFG2_MIDI; - if ((transfer->len > 1) && (spi->cur_midi > 0)) { + cfg2_clrb |= STM32H7_SPI_CFG2_COMM; + cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) & + STM32H7_SPI_CFG2_COMM; + + writel_relaxed( + (readl_relaxed(spi->base + STM32H7_SPI_CFG2) & + ~cfg2_clrb) | cfg2_setb, + spi->base + STM32H7_SPI_CFG2); + + return 0; +} + +/** + * stm32h7_spi_data_idleness - configure minimum time delay inserted between two + * consecutive data frames in master mode + * @spi: pointer to the spi controller data structure + * @len: transfer len + */ +static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len) +{ + u32 cfg2_clrb = 0, cfg2_setb = 0; + + cfg2_clrb |= STM32H7_SPI_CFG2_MIDI; + if ((len > 1) && (spi->cur_midi > 0)) { u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed); u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns), - (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT); + (u32)STM32H7_SPI_CFG2_MIDI >> + STM32H7_SPI_CFG2_MIDI_SHIFT); dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n", sck_period_ns, midi, midi * sck_period_ns); + cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) & + STM32H7_SPI_CFG2_MIDI; + } + + writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) & + ~cfg2_clrb) | cfg2_setb, + spi->base + STM32H7_SPI_CFG2); +} + +/** + * stm32h7_spi_number_of_data - configure number of data at current transfer + * @spi: pointer to the spi controller data structure + * @len: transfer length + */ +static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words) +{ + u32 cr2_clrb = 0, cr2_setb = 0; + + if (nb_words <= (STM32H7_SPI_CR2_TSIZE >> + STM32H7_SPI_CR2_TSIZE_SHIFT)) { + cr2_clrb |= STM32H7_SPI_CR2_TSIZE; + cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT; + writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) & + ~cr2_clrb) | cr2_setb, + spi->base + STM32H7_SPI_CR2); + } else { + return -EMSGSIZE; + } + + return 0; +} + +/** + * stm32_spi_transfer_one_setup - common setup to transfer a single + * spi_transfer either using DMA or + * interrupts. + */ +static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, + struct spi_device *spi_dev, + struct spi_transfer *transfer) +{ + unsigned long flags; + unsigned int comm_type; + int nb_words, ret = 0; + + spin_lock_irqsave(&spi->lock, flags); + + if (spi->cur_bpw != transfer->bits_per_word) { + spi->cur_bpw = transfer->bits_per_word; + spi->cfg->set_bpw(spi); + } - cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI; + if (spi->cur_speed != transfer->speed_hz) { + int mbr; + + /* Update spi->cur_speed with real clock speed */ + mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, + spi->cfg->baud_rate_div_min, + spi->cfg->baud_rate_div_max); + if (mbr < 0) { + ret = mbr; + goto out; + } + + transfer->speed_hz = spi->cur_speed; + stm32_spi_set_mbr(spi, mbr); } - if (cfg2_clrb || cfg2_setb) - writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) & - ~cfg2_clrb) | cfg2_setb, - spi->base + STM32_SPI_CFG2); + comm_type = stm32_spi_communication_type(spi_dev, transfer); + if (spi->cur_comm != comm_type) { + ret = spi->cfg->set_mode(spi, comm_type); + + if (ret < 0) + goto out; + + spi->cur_comm = comm_type; + } + + if (spi->cfg->set_data_idleness) + spi->cfg->set_data_idleness(spi, transfer->len); if (spi->cur_bpw <= 8) nb_words = transfer->len; @@ -950,13 +1621,11 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, nb_words = DIV_ROUND_UP(transfer->len * 8, 16); else nb_words = DIV_ROUND_UP(transfer->len * 8, 32); - nb_words <<= SPI_CR2_TSIZE_SHIFT; - if (nb_words <= SPI_CR2_TSIZE) { - writel_relaxed(nb_words, spi->base + STM32_SPI_CR2); - } else { - ret = -EMSGSIZE; - goto out; + if (spi->cfg->set_number_of_data) { + ret = spi->cfg->set_number_of_data(spi, nb_words); + if (ret < 0) + goto out; } spi->cur_xferlen = transfer->len; @@ -997,7 +1666,7 @@ static int stm32_spi_transfer_one(struct spi_master *master, spi->rx_len = spi->rx_buf ? transfer->len : 0; spi->cur_usedma = (master->can_dma && - stm32_spi_can_dma(master, spi_dev, transfer)); + master->can_dma(master, spi_dev, transfer)); ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer); if (ret) { @@ -1008,47 +1677,73 @@ static int stm32_spi_transfer_one(struct spi_master *master, if (spi->cur_usedma) return stm32_spi_transfer_one_dma(spi, transfer); else - return stm32_spi_transfer_one_irq(spi); + return spi->cfg->transfer_one_irq(spi); } /** * stm32_spi_unprepare_msg - relax the hardware - * - * Normally, if TSIZE has been configured, we should relax the hardware at the - * reception of the EOT interrupt. But in case of error, EOT will not be - * raised. So the subsystem unprepare_message call allows us to properly - * complete the transfer from an hardware point of view. */ static int stm32_spi_unprepare_msg(struct spi_master *master, struct spi_message *msg) { struct stm32_spi *spi = spi_master_get_devdata(master); - stm32_spi_disable(spi); + spi->cfg->disable(spi); + + return 0; +} + +/** + * stm32f4_spi_config - Configure SPI controller as SPI master + */ +static int stm32f4_spi_config(struct stm32_spi *spi) +{ + unsigned long flags; + + spin_lock_irqsave(&spi->lock, flags); + + /* Ensure I2SMOD bit is kept cleared */ + stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR, + STM32F4_SPI_I2SCFGR_I2SMOD); + + /* + * - SS input value high + * - transmitter half duplex direction + * - Set the master mode (default Motorola mode) + * - Consider 1 master/n slaves configuration and + * SS input value is determined by the SSI bit + */ + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI | + STM32F4_SPI_CR1_BIDIOE | + STM32F4_SPI_CR1_MSTR | + STM32F4_SPI_CR1_SSM); + + spin_unlock_irqrestore(&spi->lock, flags); return 0; } /** - * stm32_spi_config - Configure SPI controller as SPI master + * stm32h7_spi_config - Configure SPI controller as SPI master */ -static int stm32_spi_config(struct stm32_spi *spi) +static int stm32h7_spi_config(struct stm32_spi *spi) { unsigned long flags; spin_lock_irqsave(&spi->lock, flags); /* Ensure I2SMOD bit is kept cleared */ - stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD); + stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR, + STM32H7_SPI_I2SCFGR_I2SMOD); /* * - SS input value high * - transmitter half duplex direction * - automatic communication suspend when RX-Fifo is full */ - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI | - SPI_CR1_HDDIR | - SPI_CR1_MASRX); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI | + STM32H7_SPI_CR1_HDDIR | + STM32H7_SPI_CR1_MASRX); /* * - Set the master mode (default Motorola mode) @@ -1056,17 +1751,56 @@ static int stm32_spi_config(struct stm32_spi *spi) * SS input value is determined by the SSI bit * - keep control of all associated GPIOs */ - stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER | - SPI_CFG2_SSM | - SPI_CFG2_AFCNTR); + stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER | + STM32H7_SPI_CFG2_SSM | + STM32H7_SPI_CFG2_AFCNTR); spin_unlock_irqrestore(&spi->lock, flags); return 0; } +static const struct stm32_spi_cfg stm32f4_spi_cfg = { + .regs = &stm32f4_spi_regspec, + .get_bpw_mask = stm32f4_spi_get_bpw_mask, + .disable = stm32f4_spi_disable, + .config = stm32f4_spi_config, + .set_bpw = stm32f4_spi_set_bpw, + .set_mode = stm32f4_spi_set_mode, + .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start, + .dma_tx_cb = stm32f4_spi_dma_tx_cb, + .dma_rx_cb = stm32f4_spi_dma_rx_cb, + .transfer_one_irq = stm32f4_spi_transfer_one_irq, + .irq_handler_event = stm32f4_spi_irq_event, + .irq_handler_thread = stm32f4_spi_irq_thread, + .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN, + .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX, + .has_fifo = false, +}; + +static const struct stm32_spi_cfg stm32h7_spi_cfg = { + .regs = &stm32h7_spi_regspec, + .get_fifo_size = stm32h7_spi_get_fifo_size, + .get_bpw_mask = stm32h7_spi_get_bpw_mask, + .disable = stm32h7_spi_disable, + .config = stm32h7_spi_config, + .set_bpw = stm32h7_spi_set_bpw, + .set_mode = stm32h7_spi_set_mode, + .set_data_idleness = stm32h7_spi_data_idleness, + .set_number_of_data = stm32h7_spi_number_of_data, + .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start, + .dma_rx_cb = stm32h7_spi_dma_cb, + .dma_tx_cb = stm32h7_spi_dma_cb, + .transfer_one_irq = stm32h7_spi_transfer_one_irq, + .irq_handler_thread = stm32h7_spi_irq_thread, + .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN, + .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX, + .has_fifo = true, +}; + static const struct of_device_id stm32_spi_of_match[] = { - { .compatible = "st,stm32h7-spi", }, + { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg }, + { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg }, {}, }; MODULE_DEVICE_TABLE(of, stm32_spi_of_match); @@ -1090,12 +1824,17 @@ static int stm32_spi_probe(struct platform_device *pdev) spi->master = master; spin_lock_init(&spi->lock); + spi->cfg = (const struct stm32_spi_cfg *) + of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev)->data; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(spi->base)) { ret = PTR_ERR(spi->base); goto err_master_put; } + spi->phys_addr = (dma_addr_t)res->start; spi->irq = platform_get_irq(pdev, 0); @@ -1104,16 +1843,17 @@ static int stm32_spi_probe(struct platform_device *pdev) ret = -ENOENT; goto err_master_put; } - ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL, - stm32_spi_irq, IRQF_ONESHOT, - pdev->name, master); + ret = devm_request_threaded_irq(&pdev->dev, spi->irq, + spi->cfg->irq_handler_event, + spi->cfg->irq_handler_thread, + IRQF_ONESHOT, pdev->name, master); if (ret) { dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq, ret); goto err_master_put; } - spi->clk = devm_clk_get(&pdev->dev, 0); + spi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(spi->clk)) { ret = PTR_ERR(spi->clk); dev_err(&pdev->dev, "clk get failed: %d\n", ret); @@ -1139,9 +1879,10 @@ static int stm32_spi_probe(struct platform_device *pdev) reset_control_deassert(spi->rst); } - spi->fifo_size = stm32_spi_get_fifo_size(spi); + if (spi->cfg->has_fifo) + spi->fifo_size = spi->cfg->get_fifo_size(spi); - ret = stm32_spi_config(spi); + ret = spi->cfg->config(spi); if (ret) { dev_err(&pdev->dev, "controller configuration failed: %d\n", ret); @@ -1151,11 +1892,11 @@ static int stm32_spi_probe(struct platform_device *pdev) master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; master->bus_num = pdev->id; - master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST | - SPI_3WIRE | SPI_LOOP; - master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi); - master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN; - master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX; + master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | + SPI_3WIRE; + master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); + master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; + master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; master->setup = stm32_spi_setup; master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; @@ -1233,7 +1974,7 @@ static int stm32_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct stm32_spi *spi = spi_master_get_devdata(master); - stm32_spi_disable(spi); + spi->cfg->disable(spi); if (master->dma_tx) dma_release_channel(master->dma_tx); diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 5f19016bbf10..b9fb6493cd6b 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -490,8 +490,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi) ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG); if (qspi->ctrl_base) { regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, - MEM_CS_EN(spi->chip_select), - MEM_CS_MASK); + MEM_CS_MASK, + MEM_CS_EN(spi->chip_select)); } qspi->mmap_enabled = true; } @@ -503,7 +503,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi) ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG); if (qspi->ctrl_base) regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, - 0, MEM_CS_MASK); + MEM_CS_MASK, 0); qspi->mmap_enabled = false; } diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 97d137591b18..e7e8ea1edcce 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -1008,6 +1008,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) /* RX */ dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC); + if (!dma->sg_rx_p) + return; + sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */ /* offset, length setting */ sg = dma->sg_rx_p; @@ -1068,6 +1071,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) } dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC); + if (!dma->sg_tx_p) + return; + sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ /* offset, length setting */ sg = dma->sg_tx_p; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 9a7def7c3237..93986f879b09 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -19,6 +19,7 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> #include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/property.h> @@ -578,7 +579,10 @@ int spi_add_device(struct spi_device *spi) goto done; } - if (ctlr->cs_gpios) + /* Descriptors take precedence */ + if (ctlr->cs_gpiods) + spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; + else if (ctlr->cs_gpios) spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; /* Drivers may modify this initial i/o setup, but will @@ -772,10 +776,21 @@ static void spi_set_cs(struct spi_device *spi, bool enable) if (spi->mode & SPI_CS_HIGH) enable = !enable; - if (gpio_is_valid(spi->cs_gpio)) { - /* Honour the SPI_NO_CS flag */ - if (!(spi->mode & SPI_NO_CS)) - gpio_set_value(spi->cs_gpio, !enable); + if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { + /* + * Honour the SPI_NO_CS flag and invert the enable line, as + * active low is default for SPI. Execution paths that handle + * polarity inversion in gpiolib (such as device tree) will + * enforce active high using the SPI_CS_HIGH resulting in a + * double inversion through the code above. + */ + if (!(spi->mode & SPI_NO_CS)) { + if (spi->cs_gpiod) + gpiod_set_value_cansleep(spi->cs_gpiod, + !enable); + else + gpio_set_value_cansleep(spi->cs_gpio, !enable); + } /* Some SPI masters need both GPIO CS & slave_select */ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && spi->controller->set_cs) @@ -1615,13 +1630,21 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, spi->mode |= SPI_CPHA; if (of_property_read_bool(nc, "spi-cpol")) spi->mode |= SPI_CPOL; - if (of_property_read_bool(nc, "spi-cs-high")) - spi->mode |= SPI_CS_HIGH; if (of_property_read_bool(nc, "spi-3wire")) spi->mode |= SPI_3WIRE; if (of_property_read_bool(nc, "spi-lsb-first")) spi->mode |= SPI_LSB_FIRST; + /* + * For descriptors associated with the device, polarity inversion is + * handled in the gpiolib, so all chip selects are "active high" in + * the logical sense, the gpiolib will invert the line if need be. + */ + if (ctlr->use_gpio_descriptors) + spi->mode |= SPI_CS_HIGH; + else if (of_property_read_bool(nc, "spi-cs-high")) + spi->mode |= SPI_CS_HIGH; + /* Device DUAL/QUAD mode */ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { switch (value) { @@ -2137,6 +2160,60 @@ static int of_spi_register_master(struct spi_controller *ctlr) } #endif +/** + * spi_get_gpio_descs() - grab chip select GPIOs for the master + * @ctlr: The SPI master to grab GPIO descriptors for + */ +static int spi_get_gpio_descs(struct spi_controller *ctlr) +{ + int nb, i; + struct gpio_desc **cs; + struct device *dev = &ctlr->dev; + + nb = gpiod_count(dev, "cs"); + ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); + + /* No GPIOs at all is fine, else return the error */ + if (nb == 0 || nb == -ENOENT) + return 0; + else if (nb < 0) + return nb; + + cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), + GFP_KERNEL); + if (!cs) + return -ENOMEM; + ctlr->cs_gpiods = cs; + + for (i = 0; i < nb; i++) { + /* + * Most chipselects are active low, the inverted + * semantics are handled by special quirks in gpiolib, + * so initializing them GPIOD_OUT_LOW here means + * "unasserted", in most cases this will drive the physical + * line high. + */ + cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, + GPIOD_OUT_LOW); + + if (cs[i]) { + /* + * If we find a CS GPIO, name it after the device and + * chip select line. + */ + char *gpioname; + + gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", + dev_name(dev), i); + if (!gpioname) + return -ENOMEM; + gpiod_set_consumer_name(cs[i], gpioname); + } + } + + return 0; +} + static int spi_controller_check_ops(struct spi_controller *ctlr) { /* @@ -2199,9 +2276,21 @@ int spi_register_controller(struct spi_controller *ctlr) return status; if (!spi_controller_is_slave(ctlr)) { - status = of_spi_register_master(ctlr); - if (status) - return status; + if (ctlr->use_gpio_descriptors) { + status = spi_get_gpio_descs(ctlr); + if (status) + return status; + /* + * A controller using GPIO descriptors always + * supports SPI_CS_HIGH if need be. + */ + ctlr->mode_bits |= SPI_CS_HIGH; + } else { + /* Legacy code path for GPIOs from DT */ + status = of_spi_register_master(ctlr); + if (status) + return status; + } } /* even if it's just one always-selected device, there must @@ -2915,6 +3004,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * cs_change is set for each transfer. */ if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || + spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) { size_t maxsize; int ret; @@ -2961,6 +3051,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * it is not set for this transfer. * Set transfer tx_nbits and rx_nbits as single transfer default * (SPI_NBITS_SINGLE) if it is not set for this transfer. + * Ensure transfer word_delay is at least as long as that required by + * device itself. */ message->frame_length = 0; list_for_each_entry(xfer, &message->transfers, transfer_list) { @@ -3031,6 +3123,9 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) !(spi->mode & SPI_RX_QUAD)) return -EINVAL; } + + if (xfer->word_delay_usecs < spi->word_delay_usecs) + xfer->word_delay_usecs = spi->word_delay_usecs; } message->status = -EINPROGRESS; diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c index e14d7cc411c9..73b87da15eb2 100644 --- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c +++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c @@ -137,7 +137,7 @@ static int isFileReadable(char *path) ret = PTR_ERR(fp); } else { - oldfs = get_fs(); set_fs(get_ds()); + oldfs = get_fs(); set_fs(KERNEL_DS); if (1!=readFile(fp, &buf, 1)) ret = -EINVAL; @@ -165,7 +165,7 @@ static int retriveFromFile(char *path, u8 *buf, u32 sz) if (0 == (ret =openFile(&fp, path, O_RDONLY, 0))) { DBG_871X("%s openFile path:%s fp =%p\n", __func__, path , fp); - oldfs = get_fs(); set_fs(get_ds()); + oldfs = get_fs(); set_fs(KERNEL_DS); ret =readFile(fp, buf, sz); set_fs(oldfs); closeFile(fp); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index e5efce3c08e2..947f9b28de9e 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -699,8 +699,10 @@ static int __init optee_driver_init(void) return -ENODEV; np = of_find_matching_node(fw_np, optee_match); - if (!np || !of_device_is_available(np)) + if (!np || !of_device_is_available(np)) { + of_node_put(np); return -ENODEV; + } optee = optee_probe(np); of_node_put(np); diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 0212f0ee8aea..b96fedc77ee5 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -60,9 +60,9 @@ struct mdev_device *mdev_from_dev(struct device *dev) } EXPORT_SYMBOL(mdev_from_dev); -uuid_le mdev_uuid(struct mdev_device *mdev) +const guid_t *mdev_uuid(struct mdev_device *mdev) { - return mdev->uuid; + return &mdev->uuid; } EXPORT_SYMBOL(mdev_uuid); @@ -88,8 +88,7 @@ static void mdev_release_parent(struct kref *kref) put_device(dev); } -static -inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent) +static inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent) { if (parent) kref_get(&parent->ref); @@ -276,7 +275,8 @@ static void mdev_device_release(struct device *dev) kfree(mdev); } -int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) +int mdev_device_create(struct kobject *kobj, + struct device *dev, const guid_t *uuid) { int ret; struct mdev_device *mdev, *tmp; @@ -291,7 +291,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) /* Check for duplicate */ list_for_each_entry(tmp, &mdev_list, next) { - if (!uuid_le_cmp(tmp->uuid, uuid)) { + if (guid_equal(&tmp->uuid, uuid)) { mutex_unlock(&mdev_list_lock); ret = -EEXIST; goto mdev_fail; @@ -305,7 +305,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) goto mdev_fail; } - memcpy(&mdev->uuid, &uuid, sizeof(uuid_le)); + guid_copy(&mdev->uuid, uuid); list_add(&mdev->next, &mdev_list); mutex_unlock(&mdev_list_lock); @@ -315,7 +315,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) mdev->dev.parent = dev; mdev->dev.bus = &mdev_bus_type; mdev->dev.release = mdev_device_release; - dev_set_name(&mdev->dev, "%pUl", uuid.b); + dev_set_name(&mdev->dev, "%pUl", uuid); ret = device_register(&mdev->dev); if (ret) { diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index b5819b7d7ef7..379758c52b1b 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -28,7 +28,7 @@ struct mdev_parent { struct mdev_device { struct device dev; struct mdev_parent *parent; - uuid_le uuid; + guid_t uuid; void *driver_data; struct kref ref; struct list_head next; @@ -58,7 +58,8 @@ void parent_remove_sysfs_files(struct mdev_parent *parent); int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type); void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type); -int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid); +int mdev_device_create(struct kobject *kobj, + struct device *dev, const guid_t *uuid); int mdev_device_remove(struct device *dev, bool force_remove); #endif /* MDEV_PRIVATE_H */ diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index ce5dd219f2c8..5193a0e0ce5a 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -55,7 +55,7 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev, const char *buf, size_t count) { char *str; - uuid_le uuid; + guid_t uuid; int ret; if ((count < UUID_STRING_LEN) || (count > UUID_STRING_LEN + 1)) @@ -65,12 +65,12 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev, if (!str) return -ENOMEM; - ret = uuid_le_to_bin(str, &uuid); + ret = guid_parse(str, &uuid); kfree(str); if (ret) return ret; - ret = mdev_device_create(kobj, dev, uuid); + ret = mdev_device_create(kobj, dev, &uuid); if (ret) return ret; diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index ff60bd1ea587..a25659b5a5d1 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -209,6 +209,57 @@ static bool vfio_pci_nointx(struct pci_dev *pdev) return false; } +static void vfio_pci_probe_power_state(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u16 pmcsr; + + if (!pdev->pm_cap) + return; + + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr); + + vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET); +} + +/* + * pci_set_power_state() wrapper handling devices which perform a soft reset on + * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev, + * restore when returned to D0. Saved separately from pci_saved_state for use + * by PM capability emulation and separately from pci_dev internal saved state + * to avoid it being overwritten and consumed around other resets. + */ +int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state) +{ + struct pci_dev *pdev = vdev->pdev; + bool needs_restore = false, needs_save = false; + int ret; + + if (vdev->needs_pm_restore) { + if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) { + pci_save_state(pdev); + needs_save = true; + } + + if (pdev->current_state >= PCI_D3hot && state <= PCI_D0) + needs_restore = true; + } + + ret = pci_set_power_state(pdev, state); + + if (!ret) { + /* D3 might be unsupported via quirk, skip unless in D3 */ + if (needs_save && pdev->current_state >= PCI_D3hot) { + vdev->pm_save = pci_store_saved_state(pdev); + } else if (needs_restore) { + pci_load_and_free_saved_state(pdev, &vdev->pm_save); + pci_restore_state(pdev); + } + } + + return ret; +} + static int vfio_pci_enable(struct vfio_pci_device *vdev) { struct pci_dev *pdev = vdev->pdev; @@ -216,7 +267,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) u16 cmd; u8 msix_pos; - pci_set_power_state(pdev, PCI_D0); + vfio_pci_set_power_state(vdev, PCI_D0); /* Don't allow our initial saved state to include busmaster */ pci_clear_master(pdev); @@ -407,7 +458,7 @@ out: vfio_pci_try_bus_reset(vdev); if (!disable_idle_d3) - pci_set_power_state(pdev, PCI_D3hot); + vfio_pci_set_power_state(vdev, PCI_D3hot); } static void vfio_pci_release(void *device_data) @@ -708,6 +759,7 @@ static long vfio_pci_ioctl(void *device_data, { void __iomem *io; size_t size; + u16 orig_cmd; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = 0; @@ -723,15 +775,23 @@ static long vfio_pci_ioctl(void *device_data, break; } - /* Is it really there? */ + /* + * Is it really there? Enable memory decode for + * implicit access in pci_map_rom(). + */ + pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd); + pci_write_config_word(pdev, PCI_COMMAND, + orig_cmd | PCI_COMMAND_MEMORY); + io = pci_map_rom(pdev, &size); - if (!io || !size) { + if (io) { + info.flags = VFIO_REGION_INFO_FLAG_READ; + pci_unmap_rom(pdev, io); + } else { info.size = 0; - break; } - pci_unmap_rom(pdev, io); - info.flags = VFIO_REGION_INFO_FLAG_READ; + pci_write_config_word(pdev, PCI_COMMAND, orig_cmd); break; } case VFIO_PCI_VGA_REGION_INDEX: @@ -1286,6 +1346,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) vfio_pci_set_vga_decode(vdev, false)); } + vfio_pci_probe_power_state(vdev); + if (!disable_idle_d3) { /* * pci-core sets the device power state to an unknown value at @@ -1296,8 +1358,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) * be able to get to D3. Therefore first do a D0 transition * before going to D3. */ - pci_set_power_state(pdev, PCI_D0); - pci_set_power_state(pdev, PCI_D3hot); + vfio_pci_set_power_state(vdev, PCI_D0); + vfio_pci_set_power_state(vdev, PCI_D3hot); } return ret; @@ -1316,6 +1378,11 @@ static void vfio_pci_remove(struct pci_dev *pdev) vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); kfree(vdev->region); mutex_destroy(&vdev->ioeventfds_lock); + + if (!disable_idle_d3) + vfio_pci_set_power_state(vdev, PCI_D0); + + kfree(vdev->pm_save); kfree(vdev); if (vfio_pci_is_vga(pdev)) { @@ -1324,9 +1391,6 @@ static void vfio_pci_remove(struct pci_dev *pdev) VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM | VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM); } - - if (!disable_idle_d3) - pci_set_power_state(pdev, PCI_D0); } static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, @@ -1551,7 +1615,7 @@ put_devs: tmp->needs_reset = false; if (tmp != vdev && !disable_idle_d3) - pci_set_power_state(tmp->pdev, PCI_D3hot); + vfio_pci_set_power_state(tmp, PCI_D3hot); } vfio_device_put(devs.devices[i]); diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 423ea1f98441..e82b51114687 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -691,7 +691,7 @@ static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos, break; } - pci_set_power_state(vdev->pdev, state); + vfio_pci_set_power_state(vdev, state); } return count; diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 8c0009f00818..1812cf22fc4f 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -114,7 +114,9 @@ struct vfio_pci_device { bool has_vga; bool needs_reset; bool nointx; + bool needs_pm_restore; struct pci_saved_state *pci_saved_state; + struct pci_saved_state *pm_save; struct vfio_pci_reflck *reflck; int refcnt; int ioeventfds_nr; @@ -161,6 +163,10 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, unsigned int type, unsigned int subtype, const struct vfio_pci_regops *ops, size_t size, u32 flags, void *data); + +extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev, + pci_power_t state); + #ifdef CONFIG_VFIO_PCI_IGD extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); #else diff --git a/drivers/vfio/platform/reset/Makefile b/drivers/vfio/platform/reset/Makefile index 57abd4f0ac5b..7294c5ea122e 100644 --- a/drivers/vfio/platform/reset/Makefile +++ b/drivers/vfio/platform/reset/Makefile @@ -2,8 +2,6 @@ vfio-platform-calxedaxgmac-y := vfio_platform_calxedaxgmac.o vfio-platform-amdxgbe-y := vfio_platform_amdxgbe.o -ccflags-y += -Idrivers/vfio/platform - obj-$(CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET) += vfio-platform-calxedaxgmac.o obj-$(CONFIG_VFIO_PLATFORM_AMDXGBE_RESET) += vfio-platform-amdxgbe.o obj-$(CONFIG_VFIO_PLATFORM_BCMFLEXRM_RESET) += vfio_platform_bcmflexrm.o diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c index bcd419cfd79c..3ddb2704221d 100644 --- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c +++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c @@ -25,7 +25,7 @@ #include <uapi/linux/mdio.h> #include <linux/delay.h> -#include "vfio_platform_private.h" +#include "../vfio_platform_private.h" #define DMA_MR 0x3000 #define MAC_VR 0x0110 diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c index d45c3be71198..16165a62b86d 100644 --- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c +++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c @@ -23,7 +23,7 @@ #include <linux/kernel.h> #include <linux/module.h> -#include "vfio_platform_private.h" +#include "../vfio_platform_private.h" /* FlexRM configuration */ #define RING_REGS_SIZE 0x10000 diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c index 49e5df6e8f29..e0356de5df54 100644 --- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c +++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c @@ -24,7 +24,7 @@ #include <linux/init.h> #include <linux/io.h> -#include "vfio_platform_private.h" +#include "../vfio_platform_private.h" #define DRIVER_VERSION "0.1" #define DRIVER_AUTHOR "Eric Auger <eric.auger@linaro.org>" diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 64833879f75d..a3030cdf3c18 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -2219,12 +2219,12 @@ static int __init vfio_init(void) vfio.class->devnode = vfio_devnode; - ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio"); + ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio"); if (ret) goto err_alloc_chrdev; cdev_init(&vfio.group_cdev, &vfio_group_fops); - ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK); + ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1); if (ret) goto err_cdev_add; @@ -2236,7 +2236,7 @@ static int __init vfio_init(void) return 0; err_cdev_add: - unregister_chrdev_region(vfio.group_devt, MINORMASK); + unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); err_alloc_chrdev: class_destroy(vfio.class); vfio.class = NULL; @@ -2254,7 +2254,7 @@ static void __exit vfio_cleanup(void) #endif idr_destroy(&vfio.group_idr); cdev_del(&vfio.group_cdev); - unregister_chrdev_region(vfio.group_devt, MINORMASK); + unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); class_destroy(vfio.class); vfio.class = NULL; misc_deregister(&vfio_dev); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index c424913324e3..8dbb270998f4 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1235,7 +1235,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, } for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) - table_group->ops->unset_window(table_group, i); + if (container->tables[i]) + table_group->ops->unset_window(table_group, i); table_group->ops->release_ownership(table_group); } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 24a129fcdd61..a2e5dc7716e2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, len, iov, 64, VHOST_ACCESS_WO); - if (ret) + if (ret < 0) return ret; for (i = 0; i < ret; i++) { diff --git a/fs/afs/cell.c b/fs/afs/cell.c index cf445dbd5f2e..9de46116c749 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -173,6 +173,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, rcu_assign_pointer(cell->vl_servers, vllist); cell->dns_expiry = TIME64_MAX; + __clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags); } else { cell->dns_expiry = ktime_get_real_seconds(); } @@ -167,9 +167,13 @@ struct kioctx { unsigned id; }; +/* + * First field must be the file pointer in all the + * iocb unions! See also 'struct kiocb' in <linux/fs.h> + */ struct fsync_iocb { - struct work_struct work; struct file *file; + struct work_struct work; bool datasync; }; @@ -183,8 +187,15 @@ struct poll_iocb { struct work_struct work; }; +/* + * NOTE! Each of the iocb union members has the file pointer + * as the first entry in their struct definition. So you can + * access the file pointer through any of the sub-structs, + * or directly as just 'ki_filp' in this struct. + */ struct aio_kiocb { union { + struct file *ki_filp; struct kiocb rw; struct fsync_iocb fsync; struct poll_iocb poll; @@ -1060,6 +1071,8 @@ static inline void iocb_put(struct aio_kiocb *iocb) { if (refcount_read(&iocb->ki_refcnt) == 0 || refcount_dec_and_test(&iocb->ki_refcnt)) { + if (iocb->ki_filp) + fput(iocb->ki_filp); percpu_ref_put(&iocb->ki_ctx->reqs); kmem_cache_free(kiocb_cachep, iocb); } @@ -1424,7 +1437,6 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) file_end_write(kiocb->ki_filp); } - fput(kiocb->ki_filp); aio_complete(iocb, res, res2); } @@ -1432,9 +1444,6 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) { int ret; - req->ki_filp = fget(iocb->aio_fildes); - if (unlikely(!req->ki_filp)) - return -EBADF; req->ki_complete = aio_complete_rw; req->private = NULL; req->ki_pos = iocb->aio_offset; @@ -1451,7 +1460,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) ret = ioprio_check_cap(iocb->aio_reqprio); if (ret) { pr_debug("aio ioprio check cap error: %d\n", ret); - goto out_fput; + return ret; } req->ki_ioprio = iocb->aio_reqprio; @@ -1460,14 +1469,10 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); if (unlikely(ret)) - goto out_fput; + return ret; req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ return 0; - -out_fput: - fput(req->ki_filp); - return ret; } static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec, @@ -1521,24 +1526,19 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, if (ret) return ret; file = req->ki_filp; - - ret = -EBADF; if (unlikely(!(file->f_mode & FMODE_READ))) - goto out_fput; + return -EBADF; ret = -EINVAL; if (unlikely(!file->f_op->read_iter)) - goto out_fput; + return -EINVAL; ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); if (ret) - goto out_fput; + return ret; ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); if (!ret) aio_rw_done(req, call_read_iter(file, req, &iter)); kfree(iovec); -out_fput: - if (unlikely(ret)) - fput(file); return ret; } @@ -1555,16 +1555,14 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, return ret; file = req->ki_filp; - ret = -EBADF; if (unlikely(!(file->f_mode & FMODE_WRITE))) - goto out_fput; - ret = -EINVAL; + return -EBADF; if (unlikely(!file->f_op->write_iter)) - goto out_fput; + return -EINVAL; ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); if (ret) - goto out_fput; + return ret; ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); if (!ret) { /* @@ -1582,9 +1580,6 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, aio_rw_done(req, call_write_iter(file, req, &iter)); } kfree(iovec); -out_fput: - if (unlikely(ret)) - fput(file); return ret; } @@ -1594,7 +1589,6 @@ static void aio_fsync_work(struct work_struct *work) int ret; ret = vfs_fsync(req->file, req->datasync); - fput(req->file); aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); } @@ -1605,13 +1599,8 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, iocb->aio_rw_flags)) return -EINVAL; - req->file = fget(iocb->aio_fildes); - if (unlikely(!req->file)) - return -EBADF; - if (unlikely(!req->file->f_op->fsync)) { - fput(req->file); + if (unlikely(!req->file->f_op->fsync)) return -EINVAL; - } req->datasync = datasync; INIT_WORK(&req->work, aio_fsync_work); @@ -1621,10 +1610,7 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) { - struct file *file = iocb->poll.file; - aio_complete(iocb, mangle_poll(mask), 0); - fput(file); } static void aio_poll_complete_work(struct work_struct *work) @@ -1680,6 +1666,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); __poll_t mask = key_to_poll(key); + unsigned long flags; req->woken = true; @@ -1688,10 +1675,15 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, if (!(mask & req->events)) return 0; - /* try to complete the iocb inline if we can: */ - if (spin_trylock(&iocb->ki_ctx->ctx_lock)) { + /* + * Try to complete the iocb inline if we can. Use + * irqsave/irqrestore because not all filesystems (e.g. fuse) + * call this function with IRQs disabled and because IRQs + * have to be disabled before ctx_lock is obtained. + */ + if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { list_del(&iocb->ki_list); - spin_unlock(&iocb->ki_ctx->ctx_lock); + spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); list_del_init(&req->wait.entry); aio_poll_complete(iocb, mask); @@ -1743,9 +1735,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) INIT_WORK(&req->work, aio_poll_complete_work); req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; - req->file = fget(iocb->aio_fildes); - if (unlikely(!req->file)) - return -EBADF; req->head = NULL; req->woken = false; @@ -1788,10 +1777,8 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) spin_unlock_irq(&ctx->ctx_lock); out: - if (unlikely(apt.error)) { - fput(req->file); + if (unlikely(apt.error)) return apt.error; - } if (mask) aio_poll_complete(aiocb, mask); @@ -1829,6 +1816,11 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, if (unlikely(!req)) goto out_put_reqs_available; + req->ki_filp = fget(iocb->aio_fildes); + ret = -EBADF; + if (unlikely(!req->ki_filp)) + goto out_put_req; + if (iocb->aio_flags & IOCB_FLAG_RESFD) { /* * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index 7cde3f46ad26..e996174cbfc0 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -14,13 +14,30 @@ #include <linux/err.h> #include <linux/fs.h> +static inline bool spacetab(char c) { return c == ' ' || c == '\t'; } +static inline char *next_non_spacetab(char *first, const char *last) +{ + for (; first <= last; first++) + if (!spacetab(*first)) + return first; + return NULL; +} +static inline char *next_terminator(char *first, const char *last) +{ + for (; first <= last; first++) + if (spacetab(*first) || !*first) + return first; + return NULL; +} + static int load_script(struct linux_binprm *bprm) { const char *i_arg, *i_name; - char *cp; + char *cp, *buf_end; struct file *file; int retval; + /* Not ours to exec if we don't start with "#!". */ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) return -ENOEXEC; @@ -33,18 +50,40 @@ static int load_script(struct linux_binprm *bprm) if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) return -ENOENT; - /* - * This section does the #! interpretation. - * Sorta complicated, but hopefully it will work. -TYT - */ - + /* Release since we are not mapping a binary into memory. */ allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; - bprm->buf[BINPRM_BUF_SIZE - 1] = '\0'; - if ((cp = strchr(bprm->buf, '\n')) == NULL) - cp = bprm->buf+BINPRM_BUF_SIZE-1; + /* + * This section handles parsing the #! line into separate + * interpreter path and argument strings. We must be careful + * because bprm->buf is not yet guaranteed to be NUL-terminated + * (though the buffer will have trailing NUL padding when the + * file size was smaller than the buffer size). + * + * We do not want to exec a truncated interpreter path, so either + * we find a newline (which indicates nothing is truncated), or + * we find a space/tab/NUL after the interpreter path (which + * itself may be preceded by spaces/tabs). Truncating the + * arguments is fine: the interpreter can re-read the script to + * parse them on its own. + */ + buf_end = bprm->buf + sizeof(bprm->buf) - 1; + cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n'); + if (!cp) { + cp = next_non_spacetab(bprm->buf + 2, buf_end); + if (!cp) + return -ENOEXEC; /* Entire buf is spaces/tabs */ + /* + * If there is no later space/tab/NUL we must assume the + * interpreter path is truncated. + */ + if (!next_terminator(cp, buf_end)) + return -ENOEXEC; + cp = buf_end; + } + /* NUL-terminate the buffer and any trailing spaces/tabs. */ *cp = '\0'; while (cp > bprm->buf) { cp--; diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 041c27ea8de1..f74193da0e09 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, capsnap->size); spin_lock(&mdsc->snap_flush_lock); - list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); + if (list_empty(&ci->i_snap_flush_item)) + list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); spin_unlock(&mdsc->snap_flush_lock); return 1; /* caller may want to ceph_flush_snaps */ } diff --git a/fs/exec.c b/fs/exec.c index fb72d36f7823..bcf383730bea 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -932,7 +932,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size, bytes = kernel_read(file, *buf + pos, i_size - pos, &pos); if (bytes < 0) { ret = bytes; - goto out; + goto out_free; } if (bytes == 0) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 32920a10100e..a7fa037b876b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping, rc = migrate_huge_page_move_mapping(mapping, newpage, page); if (rc != MIGRATEPAGE_SUCCESS) return rc; + + /* + * page_private is subpool pointer in hugetlb pages. Transfer to + * new page. PagePrivate is not associated with page_private for + * hugetlb pages and can not be set here as only page_huge_active + * pages can be migrated. + */ + if (page_private(page)) { + set_page_private(newpage, page_private(page)); + set_page_private(page, 0); + } + if (mode != MIGRATE_SYNC_NO_COPY) migrate_page_copy(newpage, page); else diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index fdf527b6d79c..d71c9405874a 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -196,8 +196,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn, return dentry; knparent = find_next_ancestor(kn, NULL); - if (WARN_ON(!knparent)) + if (WARN_ON(!knparent)) { + dput(dentry); return ERR_PTR(-EINVAL); + } do { struct dentry *dtmp; @@ -206,8 +208,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn, if (kn == knparent) return dentry; kntmp = find_next_ancestor(kn, knparent); - if (WARN_ON(!kntmp)) + if (WARN_ON(!kntmp)) { + dput(dentry); return ERR_PTR(-EINVAL); + } dtmp = lookup_one_len_unlocked(kntmp->name, dentry, strlen(kntmp->name)); dput(dentry); diff --git a/fs/namespace.c b/fs/namespace.c index a677b59efd74..c4e83d94840c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2698,7 +2698,6 @@ static long exact_copy_from_user(void *to, const void __user * from, if (!access_ok(from, n)) return n; - current->kernel_uaccess_faults_ok++; while (n) { if (__get_user(c, f)) { memset(t, 0, n); @@ -2708,7 +2707,6 @@ static long exact_copy_from_user(void *to, const void __user * from, f++; n--; } - current->kernel_uaccess_faults_ok--; return n; } @@ -2746,7 +2744,7 @@ void *copy_mount_options(const void __user * data) char *copy_mount_string(const void __user *data) { - return data ? strndup_user(data, PAGE_SIZE) : NULL; + return data ? strndup_user(data, PATH_MAX) : NULL; } /* diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 3f23b6840547..bf34ddaa2ad7 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -44,6 +44,7 @@ #include <linux/keyctl.h> #include <linux/key-type.h> #include <keys/user-type.h> +#include <keys/request_key_auth-type.h> #include <linux/module.h> #include "internal.h" @@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy; struct idmap_legacy_upcalldata { struct rpc_pipe_msg pipe_msg; struct idmap_msg idmap_msg; - struct key_construction *key_cons; + struct key *authkey; struct idmap *idmap; }; @@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = { { Opt_find_err, NULL } }; -static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); +static int nfs_idmap_legacy_upcall(struct key *, void *); static ssize_t idmap_pipe_downcall(struct file *, const char __user *, size_t); static void idmap_release_pipe(struct inode *); @@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap, static void nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) { - struct key_construction *cons = idmap->idmap_upcall_data->key_cons; + struct key *authkey = idmap->idmap_upcall_data->authkey; kfree(idmap->idmap_upcall_data); idmap->idmap_upcall_data = NULL; - complete_request_key(cons, ret); + complete_request_key(authkey, ret); + key_put(authkey); } static void @@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret) nfs_idmap_complete_pipe_upcall_locked(idmap, ret); } -static int nfs_idmap_legacy_upcall(struct key_construction *cons, - const char *op, - void *aux) +static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux) { struct idmap_legacy_upcalldata *data; + struct request_key_auth *rka = get_request_key_auth(authkey); struct rpc_pipe_msg *msg; struct idmap_msg *im; struct idmap *idmap = (struct idmap *)aux; - struct key *key = cons->key; + struct key *key = rka->target_key; int ret = -ENOKEY; if (!aux) @@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, msg = &data->pipe_msg; im = &data->idmap_msg; data->idmap = idmap; - data->key_cons = cons; + data->authkey = key_get(authkey); ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); if (ret < 0) @@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, out2: kfree(data); out1: - complete_request_key(cons, ret); + complete_request_key(authkey, ret); return ret; } @@ -651,9 +652,10 @@ out: static ssize_t idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) { + struct request_key_auth *rka; struct rpc_inode *rpci = RPC_I(file_inode(filp)); struct idmap *idmap = (struct idmap *)rpci->private; - struct key_construction *cons; + struct key *authkey; struct idmap_msg im; size_t namelen_in; int ret = -ENOKEY; @@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) if (idmap->idmap_upcall_data == NULL) goto out_noupcall; - cons = idmap->idmap_upcall_data->key_cons; + authkey = idmap->idmap_upcall_data->authkey; + rka = get_request_key_auth(authkey); if (mlen != sizeof(im)) { ret = -ENOSPC; @@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) ret = nfs_idmap_read_and_verify_message(&im, &idmap->idmap_upcall_data->idmap_msg, - cons->key, cons->authkey); + rka->target_key, authkey); if (ret >= 0) { - key_set_timeout(cons->key, nfs_idmap_cache_timeout); + key_set_timeout(rka->target_key, nfs_idmap_cache_timeout); ret = mlen; } diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index a5a2fe76568f..b094d3d79354 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -398,8 +398,6 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter loff_t pos = iocb->ki_pos; ssize_t rc = 0; - BUG_ON(iocb->private); - gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n"); orangefs_stats.reads++; @@ -416,8 +414,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite loff_t pos; ssize_t rc; - BUG_ON(iocb->private); - gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n"); inode_lock(file->f_mapping->host); diff --git a/fs/proc/base.c b/fs/proc/base.c index 633a63462573..f5ed9512d193 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) task_lock(p); if (!p->vfork_done && process_shares_mm(p, mm)) { - pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n", - task_pid_nr(p), p->comm, - p->signal->oom_score_adj, oom_adj, - task_pid_nr(task), task->comm); p->signal->oom_score_adj = oom_adj; if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) p->signal->oom_score_adj_min = (short)oom_adj; diff --git a/fs/read_write.c b/fs/read_write.c index ff3c5e6f87cf..30df848b7451 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -426,7 +426,7 @@ ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) ssize_t result; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); /* The cast to a user pointer is valid due to the set_fs() */ result = vfs_read(file, (void __user *)buf, count, pos); set_fs(old_fs); @@ -499,7 +499,7 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t return -EINVAL; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; @@ -521,7 +521,7 @@ ssize_t kernel_write(struct file *file, const void *buf, size_t count, ssize_t res; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_write(file, (__force const char __user *)buf, count, pos); set_fs(old_fs); diff --git a/fs/splice.c b/fs/splice.c index de2ede048473..6489fb9436e4 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -357,7 +357,7 @@ static ssize_t kernel_readv(struct file *file, const struct kvec *vec, ssize_t res; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos, 0); set_fs(old_fs); @@ -1123,6 +1123,9 @@ static long do_splice(struct file *in, loff_t __user *off_in, if (ipipe == opipe) return -EINVAL; + if ((in->f_flags | out->f_flags) & O_NONBLOCK) + flags |= SPLICE_F_NONBLOCK; + return splice_pipe_to_pipe(ipipe, opipe, len, flags); } @@ -1148,6 +1151,9 @@ static long do_splice(struct file *in, loff_t __user *off_in, if (unlikely(ret < 0)) return ret; + if (in->f_flags & O_NONBLOCK) + flags |= SPLICE_F_NONBLOCK; + file_start_write(out); ret = do_splice_from(ipipe, out, &offset, len, flags); file_end_write(out); @@ -1172,6 +1178,9 @@ static long do_splice(struct file *in, loff_t __user *off_in, offset = in->f_pos; } + if (out->f_flags & O_NONBLOCK) + flags |= SPLICE_F_NONBLOCK; + pipe_lock(opipe); ret = wait_for_space(opipe, flags); if (!ret) @@ -1717,6 +1726,9 @@ static long do_tee(struct file *in, struct file *out, size_t len, * copying the data. */ if (ipipe && opipe && ipipe != opipe) { + if ((in->f_flags | out->f_flags) & O_NONBLOCK) + flags |= SPLICE_F_NONBLOCK; + /* * Keep going, unless we encounter an error. The ipipe/opipe * ordering doesn't really matter. diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index d82c78a79da5..b3d2241e03f8 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -22,7 +22,6 @@ #endif #ifndef get_fs -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h new file mode 100644 index 000000000000..a726dd3f1dc6 --- /dev/null +++ b/include/keys/request_key_auth-type.h @@ -0,0 +1,36 @@ +/* request_key authorisation token key type + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H +#define _KEYS_REQUEST_KEY_AUTH_TYPE_H + +#include <linux/key.h> + +/* + * Authorisation record for request_key(). + */ +struct request_key_auth { + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +} __randomize_layout; + +static inline struct request_key_auth *get_request_key_auth(const struct key *key) +{ + return key->payload.data[0]; +} + + +#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index e098cbe27db5..12babe991594 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -31,7 +31,7 @@ struct user_key_payload { struct rcu_head rcu; /* RCU destructor */ unsigned short datalen; /* length of this data */ - char data[0]; /* actual data */ + char data[0] __aligned(__alignof__(u64)); /* actual data */ }; extern struct key_type key_type_user; diff --git a/include/linux/fs.h b/include/linux/fs.h index 29d8e2cfed0e..fd423fec8d83 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -304,13 +304,19 @@ enum rw_hint { struct kiocb { struct file *ki_filp; + + /* The 'ki_filp' pointer is shared in a union for aio */ + randomized_struct_fields_start + loff_t ki_pos; void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); void *private; int ki_flags; u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ -} __randomize_layout; + + randomized_struct_fields_end +}; static inline bool is_sync_kiocb(struct kiocb *kiocb) { diff --git a/include/linux/key-type.h b/include/linux/key-type.h index bc9af551fc83..e49d1de0614e 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -21,15 +21,6 @@ struct kernel_pkey_query; struct kernel_pkey_params; /* - * key under-construction record - * - passed to the request_key actor if supplied - */ -struct key_construction { - struct key *key; /* key being constructed */ - struct key *authkey;/* authorisation for key being constructed */ -}; - -/* * Pre-parsed payload, used by key add, update and instantiate. * * This struct will be cleared and data and datalen will be set with the data @@ -50,8 +41,7 @@ struct key_preparsed_payload { time64_t expiry; /* Expiry time of key */ } __randomize_layout; -typedef int (*request_key_actor_t)(struct key_construction *key, - const char *op, void *aux); +typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); /* * Preparsed matching criterion. @@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, - struct key *instkey); + struct key *authkey); extern int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, - struct key *instkey); -extern void complete_request_key(struct key_construction *cons, int error); + struct key *authkey); +extern void complete_request_key(struct key *authkey, int error); static inline int key_negate_and_link(struct key *key, unsigned timeout, struct key *keyring, - struct key *instkey) + struct key *authkey) { - return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); + return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); } extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); diff --git a/include/linux/mdev.h b/include/linux/mdev.h index b6e048e1045f..d7aee90e5da5 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -120,7 +120,7 @@ struct mdev_driver { extern void *mdev_get_drvdata(struct mdev_device *mdev); extern void mdev_set_drvdata(struct mdev_device *mdev, void *data); -extern uuid_le mdev_uuid(struct mdev_device *mdev); +extern const guid_t *mdev_uuid(struct mdev_device *mdev); extern struct bus_type mdev_bus_type; diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index c204d9a79436..45cdcd0fee53 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -208,6 +208,7 @@ enum tps65218_regulator_id { /* LDOs */ TPS65218_LDO_1, /* LS's */ + TPS65218_LS_2, TPS65218_LS_3, }; @@ -218,7 +219,7 @@ enum tps65218_regulator_id { /* Number of LDO voltage regulators available */ #define TPS65218_NUM_LDO 1 /* Number of total LS current regulators available */ -#define TPS65218_NUM_LS 1 +#define TPS65218_NUM_LS 2 /* Number of total regulators available */ #define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \ + TPS65218_NUM_LS) diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 8ef330027b13..19566ab9decb 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -133,6 +133,8 @@ struct mmc_ext_csd { struct sd_scr { unsigned char sda_vsn; unsigned char sda_spec3; + unsigned char sda_spec4; + unsigned char sda_specx; unsigned char bus_widths; #define SD_SCR_BUS_WIDTH_1 (1<<0) #define SD_SCR_BUS_WIDTH_4 (1<<2) @@ -277,6 +279,7 @@ struct mmc_card { unsigned int erase_shift; /* if erase unit is power 2 */ unsigned int pref_erase; /* in sectors */ unsigned int eg_boundary; /* don't cross erase-group boundaries */ + unsigned int erase_arg; /* erase / trim / discard */ u8 erased_byte; /* value of erased bytes */ u32 raw_cid[4]; /* raw card CID */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4d35ff36ceff..43d0f0c496f6 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -478,6 +478,11 @@ static inline void *mmc_priv(struct mmc_host *host) return (void *)host->private; } +static inline struct mmc_host *mmc_from_priv(void *priv) +{ + return container_of(priv, struct mmc_host, private); +} + #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) #define mmc_dev(x) ((x)->parent) @@ -502,17 +507,11 @@ void sdio_run_irqs(struct mmc_host *host); void sdio_signal_irq(struct mmc_host *host); #ifdef CONFIG_REGULATOR -int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); #else -static inline int mmc_regulator_get_ocrmask(struct regulator *supply) -{ - return 0; -} - static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit) @@ -527,7 +526,6 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, } #endif -u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); int mmc_regulator_get_supply(struct mmc_host *mmc); static inline int mmc_card_is_removable(struct mmc_host *host) diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index 1ebcf9ba1256..ec94a5aa02bb 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h @@ -91,4 +91,10 @@ #define SD_SWITCH_ACCESS_DEF 0 #define SD_SWITCH_ACCESS_HS 1 +/* + * Erase/discard + */ +#define SD_ERASE_ARG 0x00000000 +#define SD_DISCARD_ARG 0x00000001 + #endif /* LINUX_MMC_SD_H */ diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index feebd7aa6f5c..9fd3ce64a885 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -22,7 +22,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, unsigned int debounce, bool *gpio_invert); int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, bool override_active_level, + unsigned int idx, unsigned int debounce, bool *gpio_invert); void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 33e240acdc6d..b7445a44a814 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -16,13 +16,12 @@ #ifndef __LINUX_MTD_RAWNAND_H #define __LINUX_MTD_RAWNAND_H -#include <linux/wait.h> -#include <linux/spinlock.h> #include <linux/mtd/mtd.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/bbm.h> #include <linux/mtd/jedec.h> #include <linux/mtd/onfi.h> +#include <linux/mutex.h> #include <linux/of.h> #include <linux/types.h> @@ -897,25 +896,17 @@ struct nand_controller_ops { /** * struct nand_controller - Structure used to describe a NAND controller * - * @lock: protection lock - * @active: the mtd device which holds the controller currently - * @wq: wait queue to sleep on if a NAND operation is in - * progress used instead of the per chip wait queue - * when a hw controller is available. + * @lock: lock used to serialize accesses to the NAND controller * @ops: NAND controller operations. */ struct nand_controller { - spinlock_t lock; - struct nand_chip *active; - wait_queue_head_t wq; + struct mutex lock; const struct nand_controller_ops *ops; }; static inline void nand_controller_init(struct nand_controller *nfc) { - nfc->active = NULL; - spin_lock_init(&nfc->lock); - init_waitqueue_head(&nfc->wq); + mutex_init(&nfc->lock); } /** @@ -936,7 +927,6 @@ static inline void nand_controller_init(struct nand_controller *nfc) * @waitfunc: hardware specific function for wait on ready. * @block_bad: check if a block is bad, using OOB markers * @block_markbad: mark a block bad - * @erase: erase function * @set_features: set the NAND chip features * @get_features: get the NAND chip features * @chip_delay: chip dependent delay for transferring data from array to read @@ -962,7 +952,6 @@ struct nand_legacy { int (*waitfunc)(struct nand_chip *chip); int (*block_bad)(struct nand_chip *chip, loff_t ofs); int (*block_markbad)(struct nand_chip *chip, loff_t ofs); - int (*erase)(struct nand_chip *chip, int page); int (*set_features)(struct nand_chip *chip, int feature_addr, u8 *subfeature_para); int (*get_features)(struct nand_chip *chip, int feature_addr, @@ -983,7 +972,6 @@ struct nand_legacy { * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform - * @state: [INTERN] the current state of the NAND device * @oob_poi: "poison value buffer," used for laying out OOB data * before writing * @page_shift: [INTERN] number of address bits in a page (column @@ -1034,6 +1022,9 @@ struct nand_legacy { * cur_cs < numchips. NAND Controller drivers should not * modify this value, but they're allowed to read it. * @read_retries: [INTERN] the number of read retry modes supported + * @lock: lock protecting the suspended field. Also used to + * serialize accesses to the NAND device. + * @suspended: set to 1 when the device is suspended, 0 when it's not. * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1088,7 +1079,8 @@ struct nand_chip { int read_retries; - flstate_t state; + struct mutex lock; + unsigned int suspended : 1; uint8_t *oob_poi; struct nand_controller *controller; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index fa2d89e38e40..b3d360b0ee3d 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -46,9 +46,13 @@ #define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ #define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_READ_1_1_8 0x8b /* Read data bytes (Octal Output SPI) */ +#define SPINOR_OP_READ_1_8_8 0xcb /* Read data bytes (Octal I/O SPI) */ #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ #define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ #define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ +#define SPINOR_OP_PP_1_1_8 0x82 /* Octal page program */ +#define SPINOR_OP_PP_1_8_8 0xc2 /* Octal page program */ #define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ #define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ #define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ @@ -69,9 +73,13 @@ #define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ #define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ #define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_READ_1_1_8_4B 0x7c /* Read data bytes (Octal Output SPI) */ +#define SPINOR_OP_READ_1_8_8_4B 0xcc /* Read data bytes (Octal I/O SPI) */ #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ #define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ #define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ +#define SPINOR_OP_PP_1_1_8_4B 0x84 /* Octal page program */ +#define SPINOR_OP_PP_1_8_8_4B 0x8e /* Octal page program */ #define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ #define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ #define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ @@ -458,7 +466,7 @@ struct spi_nor_hwcaps { /* *(Fast) Read capabilities. * MUST be ordered by priority: the higher bit position, the higher priority. - * As a matter of performances, it is relevant to use Octo SPI protocols first, + * As a matter of performances, it is relevant to use Octal SPI protocols first, * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly * (Slow) Read. */ @@ -479,7 +487,7 @@ struct spi_nor_hwcaps { #define SNOR_HWCAPS_READ_4_4_4 BIT(9) #define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) -#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11) +#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11) #define SNOR_HWCAPS_READ_1_1_8 BIT(11) #define SNOR_HWCAPS_READ_1_8_8 BIT(12) #define SNOR_HWCAPS_READ_8_8_8 BIT(13) @@ -488,7 +496,7 @@ struct spi_nor_hwcaps { /* * Page Program capabilities. * MUST be ordered by priority: the higher bit position, the higher priority. - * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the + * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the * legacy SPI 1-1-1 protocol. * Note that Dual Page Programs are not supported because there is no existing * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory @@ -502,7 +510,7 @@ struct spi_nor_hwcaps { #define SNOR_HWCAPS_PP_1_4_4 BIT(18) #define SNOR_HWCAPS_PP_4_4_4 BIT(19) -#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20) +#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) #define SNOR_HWCAPS_PP_1_1_8 BIT(20) #define SNOR_HWCAPS_PP_1_8_8 BIT(21) #define SNOR_HWCAPS_PP_8_8_8 BIT(22) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2b2a6dce1630..4c76fe2c8488 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -11,6 +11,8 @@ #define _LINUX_NETDEV_FEATURES_H #include <linux/types.h> +#include <linux/bitops.h> +#include <asm/byteorder.h> typedef u64 netdev_features_t; @@ -154,8 +156,26 @@ enum { #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) -#define for_each_netdev_feature(mask_addr, bit) \ - for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) +/* Finds the next feature with the highest number of the range of start till 0. + */ +static inline int find_next_netdev_feature(u64 feature, unsigned long start) +{ + /* like BITMAP_LAST_WORD_MASK() for u64 + * this sets the most significant 64 - start to 0. + */ + feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); + + return fls64(feature) - 1; +} + +/* This goes for the MSB to the LSB through the set feature bits, + * mask_addr should be a u64 and bit an int + */ +#define for_each_netdev_feature(mask_addr, bit) \ + for ((bit) = find_next_netdev_feature((mask_addr), \ + NETDEV_FEATURE_COUNT); \ + (bit) >= 0; \ + (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 86dbb3e29139..848b54b7ec91 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3861,7 +3861,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) if (debug_value == 0) /* no output */ return 0; /* set low N bits */ - return (1 << debug_value) - 1; + return (1U << debug_value) - 1; } static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) diff --git a/include/linux/phy.h b/include/linux/phy.h index 127fcc9c3778..333b56d8f746 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -992,6 +992,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; } +static inline int genphy_no_ack_interrupt(struct phy_device *phydev) +{ + return 0; +} +static inline int genphy_no_config_intr(struct phy_device *phydev) +{ + return 0; +} int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum); int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, diff --git a/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h b/include/linux/platform_data/spi-ath79.h index aa71216edf99..aa71216edf99 100644 --- a/arch/mips/include/asm/mach-ath79/ath79_spi_platform.h +++ b/include/linux/platform_data/spi-ath79.h diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 1781b6cb793c..daeec7dbd65c 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1131,11 +1131,37 @@ struct regmap_irq { .reg_offset = (_id) / (_reg_bits), \ } +#define REGMAP_IRQ_MAIN_REG_OFFSET(arr) \ + { .num_regs = ARRAY_SIZE((arr)), .offset = &(arr)[0] } + +struct regmap_irq_sub_irq_map { + unsigned int num_regs; + unsigned int *offset; +}; + /** * struct regmap_irq_chip - Description of a generic regmap irq_chip. * * @name: Descriptive name for IRQ controller. * + * @main_status: Base main status register address. For chips which have + * interrupts arranged in separate sub-irq blocks with own IRQ + * registers and which have a main IRQ registers indicating + * sub-irq blocks with unhandled interrupts. For such chips fill + * sub-irq register information in status_base, mask_base and + * ack_base. + * @num_main_status_bits: Should be given to chips where number of meaningfull + * main status bits differs from num_regs. + * @sub_reg_offsets: arrays of mappings from main register bits to sub irq + * registers. First item in array describes the registers + * for first main status bit. Second array for second bit etc. + * Offset is given as sub register status offset to + * status_base. Should contain num_regs arrays. + * Can be provided for chips with more complex mapping than + * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ... + * @num_main_regs: Number of 'main status' irq registers for chips which have + * main_status set. + * * @status_base: Base status register address. * @mask_base: Base mask register address. * @mask_writeonly: Base mask register is write only. @@ -1181,6 +1207,11 @@ struct regmap_irq { struct regmap_irq_chip { const char *name; + unsigned int main_status; + unsigned int num_main_status_bits; + struct regmap_irq_sub_irq_map *sub_reg_offsets; + int num_main_regs; + unsigned int status_base; unsigned int mask_base; unsigned int unmask_base; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 389bcaf7900f..377da2357118 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -264,6 +264,7 @@ enum regulator_type { * @continuous_voltage_range: Indicates if the regulator can set any * voltage within constrains range. * @n_voltages: Number of selectors available for ops.list_voltage(). + * @n_current_limits: Number of selectors available for current limits * * @min_uV: Voltage given by the lowest selector (if linear mapping) * @uV_step: Voltage increase with each selector (if linear mapping) @@ -278,14 +279,15 @@ enum regulator_type { * @n_linear_ranges: Number of entries in the @linear_ranges (and in * linear_range_selectors if used) table(s). * @volt_table: Voltage mapping table (if table based mapping) + * @curr_table: Current limit mapping table (if table based mapping) * * @vsel_range_reg: Register for range selector when using pickable ranges * and regulator_regmap_X_voltage_X_pickable functions. * @vsel_range_mask: Mask for register bitfield used for range selector * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ * @vsel_mask: Mask for register bitfield used for selector - * @csel_reg: Register for TPS65218 LS3 current regulator - * @csel_mask: Mask for TPS65218 LS3 current regulator + * @csel_reg: Register for current limit selector using regmap set_current_limit + * @csel_mask: Mask for register bitfield used for current limit selector * @apply_reg: Register for initiate voltage change on the output when * using regulator_set_voltage_sel_regmap * @apply_bit: Register bitfield used for initiate voltage change on the @@ -333,6 +335,7 @@ struct regulator_desc { int id; unsigned int continuous_voltage_range:1; unsigned n_voltages; + unsigned int n_current_limits; const struct regulator_ops *ops; int irq; enum regulator_type type; @@ -351,6 +354,7 @@ struct regulator_desc { int n_linear_ranges; const unsigned int *volt_table; + const unsigned int *curr_table; unsigned int vsel_range_reg; unsigned int vsel_range_mask; @@ -401,13 +405,7 @@ struct regulator_desc { * NULL). * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is * insufficient. - * @ena_gpio_initialized: GPIO controlling regulator enable was properly - * initialized, meaning that >= 0 is a valid gpio - * identifier and < 0 is a non existent gpio. - * @ena_gpio: GPIO controlling regulator enable. - * @ena_gpiod: GPIO descriptor controlling regulator enable. - * @ena_gpio_invert: Sense for GPIO enable control. - * @ena_gpio_flags: Flags to use when calling gpio_request_one() + * @ena_gpiod: GPIO controlling regulator enable. */ struct regulator_config { struct device *dev; @@ -416,11 +414,7 @@ struct regulator_config { struct device_node *of_node; struct regmap *regmap; - bool ena_gpio_initialized; - int ena_gpio; struct gpio_desc *ena_gpiod; - unsigned int ena_gpio_invert:1; - unsigned int ena_gpio_flags; }; /* @@ -503,6 +497,7 @@ int regulator_notifier_call_chain(struct regulator_dev *rdev, void *rdev_get_drvdata(struct regulator_dev *rdev); struct device *rdev_get_dev(struct regulator_dev *rdev); +struct regmap *rdev_get_regmap(struct regulator_dev *rdev); int rdev_get_id(struct regulator_dev *rdev); int regulator_mode_to_status(unsigned int); @@ -543,9 +538,18 @@ int regulator_set_pull_down_regmap(struct regulator_dev *rdev); int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, bool enable); +int regulator_set_current_limit_regmap(struct regulator_dev *rdev, + int min_uA, int max_uA); +int regulator_get_current_limit_regmap(struct regulator_dev *rdev); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); void regulator_lock(struct regulator_dev *rdev); void regulator_unlock(struct regulator_dev *rdev); +/* + * Helper functions intended to be used by regulator drivers prior registering + * their regulators. + */ +int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, + unsigned int selector); #endif diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 1a4340ed8e2b..f10140da7145 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h @@ -25,14 +25,6 @@ struct regulator_init_data; * @input_supply: Name of the input regulator supply * @microvolts: Output voltage of regulator * @startup_delay: Start-up time in microseconds - * @gpio_is_open_drain: Gpio pin is open drain or normal type. - * If it is open drain type then HIGH will be set - * through PULL-UP with setting gpio as input - * and low will be set as gpio-output with driven - * to low. For non-open-drain case, the gpio will - * will be in output and drive to low/high accordingly. - * @enable_high: Polarity of enable GPIO - * 1 = Active high, 0 = Active low * @enabled_at_boot: Whether regulator has been enabled at * boot or not. 1 = Yes, 0 = No * This is used to keep the regulator at @@ -48,8 +40,6 @@ struct fixed_voltage_config { const char *input_supply; int microvolts; unsigned startup_delay; - unsigned gpio_is_open_drain:1; - unsigned enable_high:1; unsigned enabled_at_boot:1; struct regulator_init_data *init_data; }; diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h index 19fbd267406d..11cd6375215d 100644 --- a/include/linux/regulator/gpio-regulator.h +++ b/include/linux/regulator/gpio-regulator.h @@ -21,6 +21,8 @@ #ifndef __REGULATOR_GPIO_H #define __REGULATOR_GPIO_H +#include <linux/gpio/consumer.h> + struct regulator_init_data; enum regulator_type; @@ -44,18 +46,14 @@ struct gpio_regulator_state { /** * struct gpio_regulator_config - config structure * @supply_name: Name of the regulator supply - * @enable_gpio: GPIO to use for enable control - * set to -EINVAL if not used - * @enable_high: Polarity of enable GPIO - * 1 = Active high, 0 = Active low * @enabled_at_boot: Whether regulator has been enabled at * boot or not. 1 = Yes, 0 = No * This is used to keep the regulator at * the default state * @startup_delay: Start-up time in microseconds - * @gpios: Array containing the gpios needed to control - * the setting of the regulator - * @nr_gpios: Number of gpios + * @gflags: Array of GPIO configuration flags for initial + * states + * @ngpios: Number of GPIOs and configurations available * @states: Array of gpio_regulator_state entries describing * the gpio state for specific voltages * @nr_states: Number of states available @@ -69,13 +67,11 @@ struct gpio_regulator_state { struct gpio_regulator_config { const char *supply_name; - int enable_gpio; - unsigned enable_high:1; unsigned enabled_at_boot:1; unsigned startup_delay; - struct gpio *gpios; - int nr_gpios; + enum gpiod_flags *gflags; + int ngpios; struct gpio_regulator_state *states; int nr_states; diff --git a/include/linux/sched.h b/include/linux/sched.h index bba3afb4e9bf..f9b43c989577 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -739,12 +739,6 @@ struct task_struct { unsigned use_memdelay:1; #endif - /* - * May usercopy functions fault on kernel addresses? - * This is not just a single bit because this can potentially nest. - */ - unsigned int kernel_uaccess_faults_ok; - unsigned long atomic_flags; /* Flags requiring atomic access. */ struct restart_block restart_block; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 95d25b010a25..bdb9563c64a0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) skb_set_transport_header(skb, keys.control.thoff); - else + else if (offset_hint >= 0) skb_set_transport_header(skb, offset_hint); } @@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; } +static inline bool skb_is_gso_tcp(const struct sk_buff *skb) +{ + return skb_is_gso(skb) && + skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); +} + static inline void skb_gso_reset(struct sk_buff *skb) { skb_shinfo(skb)->gso_size = 0; diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index b0674e330ef6..c1c59473cef9 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -22,7 +22,7 @@ struct dma_chan; /* device.platform_data for SSP controller devices */ -struct pxa2xx_spi_master { +struct pxa2xx_spi_controller { u16 num_chipselect; u8 enable_dma; bool is_slave; @@ -54,7 +54,7 @@ struct pxa2xx_spi_chip { #include <linux/clk.h> -extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); +extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info); #endif #endif diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 3fe24500c5ee..3703d0dcac2e 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -330,6 +330,11 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf); ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, const void *buf); +struct spi_mem_dirmap_desc * +devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, + const struct spi_mem_dirmap_info *info); +void devm_spi_mem_dirmap_destroy(struct device *dev, + struct spi_mem_dirmap_desc *desc); int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, struct module *owner); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 314d922ca607..662b336aa2e4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -12,6 +12,7 @@ #include <linux/kthread.h> #include <linux/completion.h> #include <linux/scatterlist.h> +#include <linux/gpio/consumer.h> struct dma_chan; struct property_entry; @@ -116,8 +117,13 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, * @modalias: Name of the driver to use with this device, or an alias * for that name. This appears in the sysfs "modalias" attribute * for driver coldplugging, and in uevents used for hotplugging - * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when + * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when + * not using a GPIO line) use cs_gpiod in new drivers by opting in on + * the spi_master. + * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when * not using a GPIO line) + * @word_delay_usecs: microsecond delay to be inserted between consecutive + * words of a transfer * * @statistics: statistics for the spi_device * @@ -163,7 +169,9 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - int cs_gpio; /* chip select gpio */ + int cs_gpio; /* LEGACY: chip select gpio */ + struct gpio_desc *cs_gpiod; /* chip select gpio desc */ + uint8_t word_delay_usecs; /* inter-word delay */ /* the statistics */ struct spi_statistics statistics; @@ -376,9 +384,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * controller has native support for memory like operations. * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller - * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS - * number. Any individual value may be -ENOENT for CS lines that + * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per + * CS number. Any individual value may be -ENOENT for CS lines that + * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods + * in new drivers. + * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS + * number. Any individual value may be NULL for CS lines that * are not GPIOs (driven by the SPI controller itself). + * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab + * GPIO descriptors rather than using global GPIO numbers grabbed by the + * driver. This will fill in @cs_gpiods and @cs_gpios should not be used, + * and SPI devices will have the cs_gpiod assigned rather than cs_gpio. * @statistics: statistics for the spi_controller * @dma_tx: DMA transmit channel * @dma_rx: DMA receive channel @@ -557,6 +573,8 @@ struct spi_controller { /* gpio chip select */ int *cs_gpios; + struct gpio_desc **cs_gpiods; + bool use_gpio_descriptors; /* statistics */ struct spi_statistics statistics; @@ -706,6 +724,8 @@ extern void spi_res_release(struct spi_controller *ctlr, * @delay_usecs: microseconds to delay after this transfer before * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. + * @word_delay_usecs: microseconds to inter word delay after each word size + * (set by bits_per_word) transmission. * @word_delay: clock cycles to inter word delay after each word size * (set by bits_per_word) transmission. * @transfer_list: transfers are sequenced through @spi_message.transfers @@ -788,6 +808,7 @@ struct spi_transfer { #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ u8 bits_per_word; + u8 word_delay_usecs; u16 delay_usecs; u32 speed_hz; u16 word_delay; diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index cb462f9ab7dd..e0348cb0a1dd 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; + } else { + /* gso packets without NEEDS_CSUM do not set transport_offset. + * probe and drop if does not match one of the above types. + */ + if (gso_type && skb->network_header) { + if (!skb->protocol) + virtio_net_hdr_set_proto(skb, hdr); +retry: + skb_probe_transport_header(skb, -1); + if (!skb_transport_header_was_set(skb)) { + /* UFO does not specify ipv4 or 6: try both */ + if (gso_type & SKB_GSO_UDP && + skb->protocol == htons(ETH_P_IP)) { + skb->protocol = htons(ETH_P_IPV6); + goto retry; + } + return -EINVAL; + } + } } if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { diff --git a/include/net/icmp.h b/include/net/icmp.h index 6ac3a5bd0117..e0f709d26dde 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -22,6 +22,7 @@ #include <net/inet_sock.h> #include <net/snmp.h> +#include <net/ip.h> struct icmp_err { int errno; @@ -39,7 +40,13 @@ struct net_proto_family; struct sk_buff; struct net; -void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); +void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, + const struct ip_options *opt); +static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) +{ + __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt); +} + int icmp_rcv(struct sk_buff *skb); int icmp_err(struct sk_buff *skb, u32 info); int icmp_init(void); diff --git a/include/net/ip.h b/include/net/ip.h index 8866bfce6121..be3cad9c2e4c 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -667,6 +667,8 @@ static inline int ip_options_echo(struct net *net, struct ip_options *dopt, } void ip_options_fragment(struct sk_buff *skb); +int __ip_options_compile(struct net *net, struct ip_options *opt, + struct sk_buff *skb, __be32 *info); int ip_options_compile(struct net *net, struct ip_options *opt, struct sk_buff *skb); int ip_options_get(struct net *net, struct ip_options_rcu **optp, @@ -716,7 +718,7 @@ extern int sysctl_icmp_msgs_burst; int ip_misc_proc_init(void); #endif -int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, +int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family, struct netlink_ext_ack *extack); #endif /* _IP_H */ diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h index b669fe6dbc3b..98f31c7ea23d 100644 --- a/include/net/phonet/pep.h +++ b/include/net/phonet/pep.h @@ -63,10 +63,11 @@ struct pnpipehdr { u8 state_after_reset; /* reset request */ u8 error_code; /* any response */ u8 pep_type; /* status indication */ - u8 data[1]; + u8 data0; /* anything else */ }; + u8 data[]; }; -#define other_pep_type data[1] +#define other_pep_type data[0] static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) { diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 7298a53b9702..85386becbaea 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) xfrm_pol_put(pols[i]); } -void __xfrm_state_destroy(struct xfrm_state *); +void __xfrm_state_destroy(struct xfrm_state *, bool); static inline void __xfrm_state_put(struct xfrm_state *x) { @@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x) static inline void xfrm_state_put(struct xfrm_state *x) { if (refcount_dec_and_test(&x->refcnt)) - __xfrm_state_destroy(x); + __xfrm_state_destroy(x, false); +} + +static inline void xfrm_state_put_sync(struct xfrm_state *x) +{ + if (refcount_dec_and_test(&x->refcnt)) + __xfrm_state_destroy(x, true); } static inline void xfrm_state_hold(struct xfrm_state *x) @@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo { struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); int xfrm_state_delete(struct xfrm_state *x); -int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync); int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h index 277bb9d25779..aef6869f563d 100644 --- a/include/trace/events/spi.h +++ b/include/trace/events/spi.h @@ -109,6 +109,16 @@ TRACE_EVENT(spi_message_done, (unsigned)__entry->actual, (unsigned)__entry->frame) ); +/* + * consider a buffer valid if non-NULL and if it doesn't match the dummy buffer + * that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or + * SPI_CONTROLLER_MUST_RX. + */ +#define spi_valid_txbuf(msg, xfer) \ + (xfer->tx_buf && xfer->tx_buf != msg->spi->controller->dummy_tx) +#define spi_valid_rxbuf(msg, xfer) \ + (xfer->rx_buf && xfer->rx_buf != msg->spi->controller->dummy_rx) + DECLARE_EVENT_CLASS(spi_transfer, TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), @@ -120,6 +130,10 @@ DECLARE_EVENT_CLASS(spi_transfer, __field( int, chip_select ) __field( struct spi_transfer *, xfer ) __field( int, len ) + __dynamic_array(u8, rx_buf, + spi_valid_rxbuf(msg, xfer) ? xfer->len : 0) + __dynamic_array(u8, tx_buf, + spi_valid_txbuf(msg, xfer) ? xfer->len : 0) ), TP_fast_assign( @@ -127,12 +141,21 @@ DECLARE_EVENT_CLASS(spi_transfer, __entry->chip_select = msg->spi->chip_select; __entry->xfer = xfer; __entry->len = xfer->len; + + if (spi_valid_txbuf(msg, xfer)) + memcpy(__get_dynamic_array(tx_buf), + xfer->tx_buf, xfer->len); + + if (spi_valid_rxbuf(msg, xfer)) + memcpy(__get_dynamic_array(rx_buf), + xfer->rx_buf, xfer->len); ), - TP_printk("spi%d.%d %p len=%d", (int)__entry->bus_num, - (int)__entry->chip_select, - (struct spi_message *)__entry->xfer, - (int)__entry->len) + TP_printk("spi%d.%d %p len=%d tx=[%*phD] rx=[%*phD]", + __entry->bus_num, __entry->chip_select, + __entry->xfer, __entry->len, + __get_dynamic_array_len(tx_buf), __get_dynamic_array(tx_buf), + __get_dynamic_array_len(rx_buf), __get_dynamic_array(rx_buf)) ); DEFINE_EVENT(spi_transfer, spi_transfer_start, diff --git a/init/initramfs.c b/init/initramfs.c index 7cea802d00ef..fca899622937 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -550,6 +550,7 @@ skip: initrd_end = 0; } +#ifdef CONFIG_BLK_DEV_RAM #define BUF_SIZE 1024 static void __init clean_rootfs(void) { @@ -596,6 +597,7 @@ static void __init clean_rootfs(void) ksys_close(fd); kfree(buf); } +#endif static int __init populate_rootfs(void) { @@ -638,10 +640,8 @@ static int __init populate_rootfs(void) printk(KERN_INFO "Unpacking initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); - if (err) { + if (err) printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); - clean_rootfs(); - } free_initrd(); #endif } diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index abf1002080df..93a5cbbde421 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key) } if (!node || node->prefixlen != key->prefixlen || + node->prefixlen != matchlen || (node->flags & LPM_TREE_NODE_FLAG_IM)) { ret = -ENOENT; goto out; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d43b14535827..950ab2f28922 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry) struct stack_map_irq_work *work; work = container_of(entry, struct stack_map_irq_work, irq_work); - up_read(work->sem); + up_read_non_owner(work->sem); work->sem = NULL; } @@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } else { work->sem = ¤t->mm->mmap_sem; irq_work_queue(&work->irq_work); + /* + * The irq_work will release the mmap_sem with + * up_read_non_owner(). The rwsem_release() is called + * here to release the lock from lockdep's perspective. + */ + rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); } } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8577bb7f8be6..84470d1480aa 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -559,12 +559,12 @@ static int map_create(union bpf_attr *attr) err = bpf_map_new_fd(map, f_flags); if (err < 0) { /* failed to allocate fd. - * bpf_map_put() is needed because the above + * bpf_map_put_with_uref() is needed because the above * bpf_map_alloc_id() has published the map * to the userspace and the userspace may * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. */ - bpf_map_put(map); + bpf_map_put_with_uref(map); return err; } @@ -1986,7 +1986,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr) fd = bpf_map_new_fd(map, f_flags); if (fd < 0) - bpf_map_put(map); + bpf_map_put_with_uref(map); return fd; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 56674a7c3778..5fcce2f4209d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off, return 0; } -static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, - int size, enum bpf_access_type t) +static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, + u32 regno, int off, int size, + enum bpf_access_type t) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = ®s[regno]; - struct bpf_insn_access_aux info; + struct bpf_insn_access_aux info = {}; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", @@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, return -EACCES; } + env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; + return 0; } @@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn verbose(env, "cannot write into socket\n"); return -EACCES; } - err = check_sock_access(env, regno, off, size, t); + err = check_sock_access(env, insn_idx, regno, off, size, t); if (!err && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { @@ -6917,7 +6920,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) u32 off_reg; aux = &env->insn_aux_data[i + delta]; - if (!aux->alu_state) + if (!aux->alu_state || + aux->alu_state == BPF_ALU_NON_POINTER) continue; isneg = aux->alu_state & BPF_ALU_NEG_VALUE; diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index c950864016e2..c9a35f09e4b9 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -198,7 +198,7 @@ int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, void cgroup_free_root(struct cgroup_root *root); void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts); -int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags); +int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask); int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask); struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, struct cgroup_root *root, unsigned long magic, diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 583b969b0c0e..f94a7229974e 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -1116,13 +1116,11 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, void *data, unsigned long magic, struct cgroup_namespace *ns) { - struct super_block *pinned_sb = NULL; struct cgroup_sb_opts opts; struct cgroup_root *root; struct cgroup_subsys *ss; struct dentry *dentry; int i, ret; - bool new_root = false; cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); @@ -1184,29 +1182,6 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, if (root->flags ^ opts.flags) pr_warn("new mount options do not match the existing superblock, will be ignored\n"); - /* - * We want to reuse @root whose lifetime is governed by its - * ->cgrp. Let's check whether @root is alive and keep it - * that way. As cgroup_kill_sb() can happen anytime, we - * want to block it by pinning the sb so that @root doesn't - * get killed before mount is complete. - * - * With the sb pinned, tryget_live can reliably indicate - * whether @root can be reused. If it's being killed, - * drain it. We can use wait_queue for the wait but this - * path is super cold. Let's just sleep a bit and retry. - */ - pinned_sb = kernfs_pin_sb(root->kf_root, NULL); - if (IS_ERR(pinned_sb) || - !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { - mutex_unlock(&cgroup_mutex); - if (!IS_ERR_OR_NULL(pinned_sb)) - deactivate_super(pinned_sb); - msleep(10); - ret = restart_syscall(); - goto out_free; - } - ret = 0; goto out_unlock; } @@ -1232,15 +1207,20 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, ret = -ENOMEM; goto out_unlock; } - new_root = true; init_cgroup_root(root, &opts); - ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD); + ret = cgroup_setup_root(root, opts.subsys_mask); if (ret) cgroup_free_root(root); out_unlock: + if (!ret && !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { + mutex_unlock(&cgroup_mutex); + msleep(10); + ret = restart_syscall(); + goto out_free; + } mutex_unlock(&cgroup_mutex); out_free: kfree(opts.release_agent); @@ -1252,25 +1232,13 @@ out_free: dentry = cgroup_do_mount(&cgroup_fs_type, flags, root, CGROUP_SUPER_MAGIC, ns); - /* - * There's a race window after we release cgroup_mutex and before - * allocating a superblock. Make sure a concurrent process won't - * be able to re-use the root during this window by delaying the - * initialization of root refcnt. - */ - if (new_root) { - mutex_lock(&cgroup_mutex); - percpu_ref_reinit(&root->cgrp.self.refcnt); - mutex_unlock(&cgroup_mutex); + if (!IS_ERR(dentry) && percpu_ref_is_dying(&root->cgrp.self.refcnt)) { + struct super_block *sb = dentry->d_sb; + dput(dentry); + deactivate_locked_super(sb); + msleep(10); + dentry = ERR_PTR(restart_syscall()); } - - /* - * If @pinned_sb, we're reusing an existing root and holding an - * extra ref on its sb. Mount is complete. Put the extra ref. - */ - if (pinned_sb) - deactivate_super(pinned_sb); - return dentry; } diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index f31bd61c9466..7fd9f22e406d 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1927,7 +1927,7 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts) set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); } -int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags) +int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) { LIST_HEAD(tmp_links); struct cgroup *root_cgrp = &root->cgrp; @@ -1944,7 +1944,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags) root_cgrp->ancestor_ids[0] = ret; ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, - ref_flags, GFP_KERNEL); + 0, GFP_KERNEL); if (ret) goto out; @@ -2033,7 +2033,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, struct cgroup_namespace *ns) { struct dentry *dentry; - bool new_sb; + bool new_sb = false; dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb); @@ -2043,6 +2043,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, */ if (!IS_ERR(dentry) && ns != &init_cgroup_ns) { struct dentry *nsdentry; + struct super_block *sb = dentry->d_sb; struct cgroup *cgrp; mutex_lock(&cgroup_mutex); @@ -2053,12 +2054,14 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, spin_unlock_irq(&css_set_lock); mutex_unlock(&cgroup_mutex); - nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb); + nsdentry = kernfs_node_dentry(cgrp->kn, sb); dput(dentry); + if (IS_ERR(nsdentry)) + deactivate_locked_super(sb); dentry = nsdentry; } - if (IS_ERR(dentry) || !new_sb) + if (!new_sb) cgroup_put(&root->cgrp); return dentry; @@ -2118,18 +2121,16 @@ static void cgroup_kill_sb(struct super_block *sb) struct cgroup_root *root = cgroup_root_from_kf(kf_root); /* - * If @root doesn't have any mounts or children, start killing it. + * If @root doesn't have any children, start killing it. * This prevents new mounts by disabling percpu_ref_tryget_live(). * cgroup_mount() may wait for @root's release. * * And don't kill the default root. */ - if (!list_empty(&root->cgrp.self.children) || - root == &cgrp_dfl_root) - cgroup_put(&root->cgrp); - else + if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root && + !percpu_ref_is_dying(&root->cgrp.self.refcnt)) percpu_ref_kill(&root->cgrp.self.refcnt); - + cgroup_put(&root->cgrp); kernfs_kill_sb(sb); } @@ -5399,7 +5400,7 @@ int __init cgroup_init(void) hash_add(css_set_table, &init_css_set.hlist, css_set_hash(init_css_set.subsys)); - BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0, 0)); + BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); mutex_unlock(&cgroup_mutex); diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index c3484785b179..0e97ca9306ef 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group) expires = group->next_update; if (now < expires) goto out; - if (now - expires > psi_period) + if (now - expires >= psi_period) missed_periods = div_u64(now - expires, psi_period); /* diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c521b7347482..c4238b441624 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file const char tgid_space[] = " "; const char space[] = " "; + print_event_info(buf, m); + seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d5fb09ebba8b..9eaf07f99212 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = { static nokprobe_inline int fetch_store_strlen(unsigned long addr) { - mm_segment_t old_fs; int ret, len = 0; u8 c; - old_fs = get_fs(); - set_fs(KERNEL_DS); - pagefault_disable(); - do { - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); + ret = probe_mem_read(&c, (u8 *)addr + len, 1); len++; } while (c && ret == 0 && len < MAX_STRING_SIZE); - pagefault_enable(); - set_fs(old_fs); - return (ret < 0) ? ret : len; } diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index d8c474b6691e..9737059ec58b 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -113,6 +113,28 @@ config KASAN_INLINE endchoice +config KASAN_STACK_ENABLE + bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST + default !(CLANG_VERSION < 90000) + depends on KASAN + help + The LLVM stack address sanitizer has a know problem that + causes excessive stack usage in a lot of functions, see + https://bugs.llvm.org/show_bug.cgi?id=38809 + Disabling asan-stack makes it safe to run kernels build + with clang-8 with KASAN enabled, though it loses some of + the functionality. + This feature is always disabled when compile-testing with clang-8 + or earlier to avoid cluttering the output in stack overflow + warnings, but clang-8 users can still enable it for builds without + CONFIG_COMPILE_TEST. On gcc and later clang versions it is + assumed to always be safe to use and enabled by default. + +config KASAN_STACK + int + default 1 if KASAN_STACK_ENABLE || CC_IS_GCC + default 0 + config KASAN_S390_4_LEVEL_PAGING bool "KASan: use 4-level paging" depends on KASAN && S390 diff --git a/lib/assoc_array.c b/lib/assoc_array.c index c6659cb37033..59875eb278ea 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -768,9 +768,11 @@ all_leaves_cluster_together: new_s0->index_key[i] = ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); - blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); - pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); - new_s0->index_key[keylen - 1] &= ~blank; + if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) { + blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); + pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); + new_s0->index_key[keylen - 1] &= ~blank; + } /* This now reduces to a node splitting exercise for which we'll need * to regenerate the disparity table. diff --git a/mm/debug.c b/mm/debug.c index 0abb987dad9b..1611cf00a137 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = { void __dump_page(struct page *page, const char *reason) { - struct address_space *mapping = page_mapping(page); + struct address_space *mapping; bool page_poisoned = PagePoisoned(page); int mapcount; @@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason) goto hex_only; } + mapping = page_mapping(page); + /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to diff --git a/mm/hugetlb.c b/mm/hugetlb.c index afef61656c1e..8dfdffc34a99 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3624,7 +3624,6 @@ retry_avoidcopy: copy_user_huge_page(new_page, old_page, address, vma, pages_per_huge_page(h)); __SetPageUptodate(new_page); - set_page_huge_active(new_page); mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h)); mmu_notifier_invalidate_range_start(&range); @@ -3645,6 +3644,7 @@ retry_avoidcopy: make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page, true); hugepage_add_new_anon_rmap(new_page, vma, haddr); + set_page_huge_active(new_page); /* Make the old page be freed below */ new_page = old_page; } @@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, pte_t new_pte; spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); + bool new_page = false; /* * Currently, we are forced to kill the process in the event the @@ -3790,7 +3791,7 @@ retry: } clear_huge_page(page, address, pages_per_huge_page(h)); __SetPageUptodate(page); - set_page_huge_active(page); + new_page = true; if (vma->vm_flags & VM_MAYSHARE) { int err = huge_add_to_page_cache(page, mapping, idx); @@ -3861,6 +3862,15 @@ retry: } spin_unlock(ptl); + + /* + * Only make newly allocated pages active. Existing pages found + * in the pagecache could be !page_huge_active() if they have been + * isolated for migration. + */ + if (new_page) + set_page_huge_active(page); + unlock_page(page); out: return ret; @@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, * the set_pte_at() write. */ __SetPageUptodate(page); - set_page_huge_active(page); mapping = dst_vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, dst_vma, dst_addr); @@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); + set_page_huge_active(page); if (vm_shared) unlock_page(page); ret = 0; diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index e2bb06c1b45e..5d1065efbd47 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_generic.o = -pg +CFLAGS_REMOVE_tags.o = -pg + # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 73c9cbfdedf4..09b534fbba17 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) * get different tags. */ static u8 assign_tag(struct kmem_cache *cache, const void *object, - bool init, bool krealloc) + bool init, bool keep_tag) { - /* Reuse the same tag for krealloc'ed objects. */ - if (krealloc) + /* + * 1. When an object is kmalloc()'ed, two hooks are called: + * kasan_slab_alloc() and kasan_kmalloc(). We assign the + * tag only in the first one. + * 2. We reuse the same tag for krealloc'ed objects. + */ + if (keep_tag) return get_tag(object); /* @@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, return (void *)object; } -void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, - gfp_t flags) -{ - return kasan_kmalloc(cache, object, cache->object_size, flags); -} - static inline bool shadow_invalid(u8 tag, s8 shadow_byte) { if (IS_ENABLED(CONFIG_KASAN_GENERIC)) @@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) } static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags, bool krealloc) + size_t size, gfp_t flags, bool keep_tag) { unsigned long redzone_start; unsigned long redzone_end; @@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, KASAN_SHADOW_SCALE_SIZE); if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - tag = assign_tag(cache, object, false, krealloc); + tag = assign_tag(cache, object, false, keep_tag); /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ kasan_unpoison_shadow(set_tag(object, tag), size); @@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, return set_tag(object, tag); } +void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, + gfp_t flags) +{ + return __kasan_kmalloc(cache, object, cache->object_size, flags, false); +} + void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags) { - return __kasan_kmalloc(cache, object, size, flags, false); + return __kasan_kmalloc(cache, object, size, flags, true); } EXPORT_SYMBOL(kasan_kmalloc); diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 0777649e07c4..63fca3172659 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -46,7 +46,7 @@ void kasan_init_tags(void) int cpu; for_each_possible_cpu(cpu) - per_cpu(prng_state, cpu) = get_random_u32(); + per_cpu(prng_state, cpu) = (u32)get_cycles(); } /* diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f9d9dc250428..707fa5579f66 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, unsigned long flags; struct kmemleak_object *object, *parent; struct rb_node **link, *rb_parent; + unsigned long untagged_ptr; object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); if (!object) { @@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, write_lock_irqsave(&kmemleak_lock, flags); - min_addr = min(min_addr, ptr); - max_addr = max(max_addr, ptr + size); + untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); + min_addr = min(min_addr, untagged_ptr); + max_addr = max(max_addr, untagged_ptr + size); link = &object_tree_root.rb_node; rb_parent = NULL; while (*link) { @@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end, unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); unsigned long *end = _end - (BYTES_PER_POINTER - 1); unsigned long flags; + unsigned long untagged_ptr; read_lock_irqsave(&kmemleak_lock, flags); for (ptr = start; ptr < end; ptr++) { @@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end, pointer = *ptr; kasan_enable_current(); - if (pointer < min_addr || pointer >= max_addr) + untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer); + if (untagged_ptr < min_addr || untagged_ptr >= max_addr) continue; /* diff --git a/mm/maccess.c b/mm/maccess.c index f3416632e5a4..ec00be51a24f 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -30,10 +30,8 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) set_fs(KERNEL_DS); pagefault_disable(); - current->kernel_uaccess_faults_ok++; ret = __copy_from_user_inatomic(dst, (__force const void __user *)src, size); - current->kernel_uaccess_faults_ok--; pagefault_enable(); set_fs(old_fs); @@ -60,9 +58,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) set_fs(KERNEL_DS); pagefault_disable(); - current->kernel_uaccess_faults_ok++; ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); - current->kernel_uaccess_faults_ok--; pagefault_enable(); set_fs(old_fs); @@ -98,13 +94,11 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) set_fs(KERNEL_DS); pagefault_disable(); - current->kernel_uaccess_faults_ok++; do { ret = __get_user(*dst++, (const char __user __force *)src++); } while (dst[-1] && ret == 0 && src - unsafe_addr < count); - current->kernel_uaccess_faults_ok--; dst[-1] = '\0'; pagefault_enable(); set_fs(old_fs); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 124e794867c5..1ad28323fb9f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page) return PageBuddy(page) && page_order(page) >= pageblock_order; } -/* Return the start of the next active pageblock after a given page */ -static struct page *next_active_pageblock(struct page *page) +/* Return the pfn of the start of the next active pageblock after a given pfn */ +static unsigned long next_active_pageblock(unsigned long pfn) { + struct page *page = pfn_to_page(pfn); + /* Ensure the starting page is pageblock-aligned */ - BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); + BUG_ON(pfn & (pageblock_nr_pages - 1)); /* If the entire pageblock is free, move to the end of free page */ if (pageblock_free(page)) { @@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page) /* be careful. we don't have locks, page_order can be changed.*/ order = page_order(page); if ((order < MAX_ORDER) && (order >= pageblock_order)) - return page + (1 << order); + return pfn + (1 << order); } - return page + pageblock_nr_pages; + return pfn + pageblock_nr_pages; } -static bool is_pageblock_removable_nolock(struct page *page) +static bool is_pageblock_removable_nolock(unsigned long pfn) { + struct page *page = pfn_to_page(pfn); struct zone *zone; - unsigned long pfn; /* * We have to be careful here because we are iterating over memory @@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page) /* Checks if this range of memory is likely to be hot-removable. */ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) { - struct page *page = pfn_to_page(start_pfn); - unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page))); - struct page *end_page = pfn_to_page(end_pfn); + unsigned long end_pfn, pfn; + + end_pfn = min(start_pfn + nr_pages, + zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); /* Check the starting page of each pageblock within the range */ - for (; page < end_page; page = next_active_pageblock(page)) { - if (!is_pageblock_removable_nolock(page)) + for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { + if (!is_pageblock_removable_nolock(pfn)) return false; cond_resched(); } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d4496d9d34f5..ee2bce59d2bf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; - const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); + unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); if (copy > nbytes) { if (copy > PAGE_SIZE) @@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy, int uninitialized_var(pval); nodemask_t nodes; - if (nmask != NULL && maxnode < MAX_NUMNODES) + if (nmask != NULL && maxnode < nr_node_ids) return -EINVAL; err = do_get_mempolicy(&pval, &nodes, addr, flags); @@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); - nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); + nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) diff --git a/mm/migrate.c b/mm/migrate.c index d4fd680be3b0..181f5d2718a9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1315,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, lock_page(hpage); } + /* + * Check for pages which are in the process of being freed. Without + * page_mapping() set, hugetlbfs specific move page routine will not + * be called and we could leak usage counts for subpools. + */ + if (page_private(hpage) && !page_mapping(hpage)) { + rc = -EBUSY; + goto out_unlock; + } + if (PageAnon(hpage)) anon_vma = page_get_anon_vma(hpage); @@ -1345,6 +1355,7 @@ put_anon: put_new_page = NULL; } +out_unlock: unlock_page(hpage); out: if (rc != -EAGAIN) diff --git a/mm/mmap.c b/mm/mmap.c index f901065c4c64..fc1809b1bed6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2426,12 +2426,11 @@ int expand_downwards(struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; - int error; + int error = 0; address &= PAGE_MASK; - error = security_mmap_addr(address); - if (error) - return error; + if (address < mmap_min_addr) + return -EPERM; /* Enforce stack_guard_gap */ prev = vma->vm_prev; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 46285d28e43b..0b9f577b1a2a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone) max_boost = mult_frac(zone->_watermark[WMARK_HIGH], watermark_boost_factor, 10000); + + /* + * high watermark may be uninitialised if fragmentation occurs + * very early in boot so do not boost. We do not fall + * through and boost by pageblock_nr_pages as failing + * allocations that early means that reclaim is not going + * to help and it may even be impossible to reclaim the + * boosted watermark resulting in a hang. + */ + if (!max_boost) + return; + max_boost = max(pageblock_nr_pages, max_boost); zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, @@ -4675,11 +4687,11 @@ refill: /* Even if we own the page, we do not use atomic_set(). * This would break get_page_unless_zero() users. */ - page_ref_add(page, size); + page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); /* reset page count bias and offset to start of new frag */ nc->pfmemalloc = page_is_pfmemalloc(page); - nc->pagecnt_bias = size + 1; + nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; nc->offset = size; } @@ -4695,10 +4707,10 @@ refill: size = nc->size; #endif /* OK, page count is 0, we can safely set it */ - set_page_count(page, size + 1); + set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); /* reset page count bias and offset to start of new frag */ - nc->pagecnt_bias = size + 1; + nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; offset = size - fragsz; } diff --git a/mm/shmem.c b/mm/shmem.c index 6ece1e2fe76e..2c012eee133d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2848,16 +2848,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); - int ret; + int ret = 0; /* * No ordinary (disk based) filesystem counts links as inodes; * but each new link needs a new dentry, pinning lowmem, and * tmpfs dentries cannot be pruned until they are unlinked. + * But if an O_TMPFILE file is linked into the tmpfs, the + * first link must skip that, to get the accounting right. */ - ret = shmem_reserve_inode(inode->i_sb); - if (ret) - goto out; + if (inode->i_nlink) { + ret = shmem_reserve_inode(inode->i_sb); + if (ret) + goto out; + } dir->i_size += BOGO_DIRENT_SIZE; inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); diff --git a/mm/slab.c b/mm/slab.c index 78eb8c5bf4e4..91c1863df93d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, void *freelist; void *addr = page_address(page); - page->s_mem = kasan_reset_tag(addr) + colour_off; + page->s_mem = addr + colour_off; page->active = 0; if (OBJFREELIST_SLAB(cachep)) @@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, /* Slab management obj is off-slab. */ freelist = kmem_cache_alloc_node(cachep->freelist_cache, local_flags, nodeid); + freelist = kasan_reset_tag(freelist); if (!freelist) return NULL; } else { @@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, offset *= cachep->colour_off; + /* + * Call kasan_poison_slab() before calling alloc_slabmgmt(), so + * page_address() in the latter returns a non-tagged pointer, + * as it should be for slab pages. + */ + kasan_poison_slab(page); + /* Get slab management. */ freelist = alloc_slabmgmt(cachep, page, offset, local_flags & ~GFP_CONSTRAINT_MASK, page_node); @@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, slab_map_pages(cachep, page, freelist); - kasan_poison_slab(page); cache_init_objs(cachep, page); if (gfpflags_allow_blocking(local_flags)) @@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *ret = slab_alloc(cachep, flags, _RET_IP_); - ret = kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc(_RET_IP_, ret, cachep->object_size, cachep->size, flags); @@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); - ret = kasan_slab_alloc(cachep, ret, flags); trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep->object_size, cachep->size, flags, nodeid); @@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, unsigned int objnr; unsigned long offset; + ptr = kasan_reset_tag(ptr); + /* Find and validate object. */ cachep = page->slab_cache; objnr = obj_to_index(cachep, page, (void *)ptr); diff --git a/mm/slab.h b/mm/slab.h index 4190c24ef0e9..384105318779 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, flags &= gfp_allowed_mask; for (i = 0; i < size; i++) { - void *object = p[i]; - - kmemleak_alloc_recursive(object, s->object_size, 1, + p[i] = kasan_slab_alloc(s, p[i], flags); + /* As p[i] might get tagged, call kmemleak hook after KASAN. */ + kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, flags); - p[i] = kasan_slab_alloc(s, object, flags); } if (memcg_kmem_enabled()) diff --git a/mm/slab_common.c b/mm/slab_common.c index 81732d05e74a..f9d89c1b5977 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) flags |= __GFP_COMP; page = alloc_pages(flags, order); ret = page ? page_address(page) : NULL; - kmemleak_alloc(ret, size, 1, flags); ret = kasan_kmalloc_large(ret, size, flags); + /* As ret might get tagged, call kmemleak hook after KASAN. */ + kmemleak_alloc(ret, size, 1, flags); return ret; } EXPORT_SYMBOL(kmalloc_order); diff --git a/mm/slub.c b/mm/slub.c index 1e3d0ec4e200..dc777761b6b7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, unsigned long ptr_addr) { #ifdef CONFIG_SLAB_FREELIST_HARDENED - return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); + /* + * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged. + * Normally, this doesn't cause any issues, as both set_freepointer() + * and get_freepointer() are called with a pointer with the same tag. + * However, there are some issues with CONFIG_SLUB_DEBUG code. For + * example, when __free_slub() iterates over objects in a cache, it + * passes untagged pointers to check_object(). check_object() in turns + * calls get_freepointer() with an untagged pointer, which causes the + * freepointer to be restored incorrectly. + */ + return (void *)((unsigned long)ptr ^ s->random ^ + (unsigned long)kasan_reset_tag((void *)ptr_addr)); #else return ptr; #endif @@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) __p < (__addr) + (__objects) * (__s)->size; \ __p += (__s)->size) -#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \ - for (__p = fixup_red_left(__s, __addr), __idx = 1; \ - __idx <= __objects; \ - __p += (__s)->size, __idx++) - /* Determine object index from a given position */ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) { - return (p - addr) / s->size; + return (kasan_reset_tag(p) - addr) / s->size; } static inline unsigned int order_objects(unsigned int order, unsigned int size) @@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s, return 1; base = page_address(page); + object = kasan_reset_tag(object); object = restore_red_left(s, object); if (object < base || object >= base + page->objects * s->size || (object - base) % s->size) { @@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, init_tracking(s, object); } +static void setup_page_debug(struct kmem_cache *s, void *addr, int order) +{ + if (!(s->flags & SLAB_POISON)) + return; + + metadata_access_enable(); + memset(addr, POISON_INUSE, PAGE_SIZE << order); + metadata_access_disable(); +} + static inline int alloc_consistency_checks(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) @@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, #else /* !CONFIG_SLUB_DEBUG */ static inline void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) {} +static inline void setup_page_debug(struct kmem_cache *s, + void *addr, int order) {} static inline int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) { return 0; } @@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, */ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) { + ptr = kasan_kmalloc_large(ptr, size, flags); + /* As ptr might get tagged, call kmemleak hook after KASAN. */ kmemleak_alloc(ptr, size, 1, flags); - return kasan_kmalloc_large(ptr, size, flags); + return ptr; } static __always_inline void kfree_hook(void *x) @@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if (page_is_pfmemalloc(page)) SetPageSlabPfmemalloc(page); + kasan_poison_slab(page); + start = page_address(page); - if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << order); - - kasan_poison_slab(page); + setup_page_debug(s, start, order); shuffle = shuffle_freelist(s, page); if (!shuffle) { - for_each_object_idx(p, idx, s, start, page->objects) { - if (likely(idx < page->objects)) { - next = p + s->size; - next = setup_object(s, page, next); - set_freepointer(s, p, next); - } else - set_freepointer(s, p, NULL); - } start = fixup_red_left(s, start); start = setup_object(s, page, start); page->freelist = start; + for (idx = 0, p = start; idx < page->objects - 1; idx++) { + next = p + s->size; + next = setup_object(s, page, next); + set_freepointer(s, p, next); + p = next; + } + set_freepointer(s, p, NULL); } page->inuse = page->objects; diff --git a/mm/swap.c b/mm/swap.c index 4929bc1be60e..4d7d37eb3c40 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu) { } -static bool need_activate_page_drain(int cpu) -{ - return false; -} - void activate_page(struct page *page) { struct zone *zone = page_zone(page); @@ -653,13 +648,15 @@ void lru_add_drain(void) put_cpu(); } +#ifdef CONFIG_SMP + +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); + static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); - /* * Doesn't need any cpu hotplug locking because we do rely on per-cpu * kworkers being shut down before our page_alloc_cpu_dead callback is @@ -702,6 +699,12 @@ void lru_add_drain_all(void) mutex_unlock(&lock); } +#else +void lru_add_drain_all(void) +{ + lru_add_drain(); +} +#endif /** * release_pages - batched put_page() diff --git a/mm/util.c b/mm/util.c index 1ea055138043..379319b1bcfd 100644 --- a/mm/util.c +++ b/mm/util.c @@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len) { void *p; - p = kmalloc_track_caller(len, GFP_USER); + p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index fa2644d276ef..e31e1b20f7f4 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -13,27 +13,13 @@ #include <net/sock.h> #include <net/tcp.h> -static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) -{ - u32 ret; - - preempt_disable(); - rcu_read_lock(); - bpf_cgroup_storage_set(storage); - ret = BPF_PROG_RUN(prog, ctx); - rcu_read_unlock(); - preempt_enable(); - - return ret; -} - -static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, - u32 *time) +static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, + u32 *retval, u32 *time) { struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; enum bpf_cgroup_storage_type stype; u64 time_start, time_spent = 0; + int ret = 0; u32 i; for_each_cgroup_storage_type(stype) { @@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, if (!repeat) repeat = 1; + + rcu_read_lock(); + preempt_disable(); time_start = ktime_get_ns(); for (i = 0; i < repeat; i++) { - *ret = bpf_test_run_one(prog, ctx, storage); + bpf_cgroup_storage_set(storage); + *retval = BPF_PROG_RUN(prog, ctx); + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + if (need_resched()) { - if (signal_pending(current)) - break; time_spent += ktime_get_ns() - time_start; + preempt_enable(); + rcu_read_unlock(); + cond_resched(); + + rcu_read_lock(); + preempt_disable(); time_start = ktime_get_ns(); } } time_spent += ktime_get_ns() - time_start; + preempt_enable(); + rcu_read_unlock(); + do_div(time_spent, repeat); *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; for_each_cgroup_storage_type(stype) bpf_cgroup_storage_free(storage[stype]); - return 0; + return ret; } static int bpf_test_finish(const union bpf_attr *kattr, diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 3aeff0895669..ac92b2eb32b1 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br, return; br_multicast_update_query_timer(br, query, max_delay); - - /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules, - * the arrival port for IGMP Queries where the source address - * is 0.0.0.0 should not be added to router port list. - */ - if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) || - saddr->proto == htons(ETH_P_IPV6)) - br_multicast_mark_router(br, port); + br_multicast_mark_router(br, port); } static void br_ip4_multicast_query(struct net_bridge *br, diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 3661cdd927f1..7e71b0df1fbc 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2058,6 +2058,8 @@ static int process_connect(struct ceph_connection *con) dout("process_connect on %p tag %d\n", con, (int)con->in_tag); if (con->auth) { + int len = le32_to_cpu(con->in_reply.authorizer_len); + /* * Any connection that defines ->get_authorizer() * should also define ->add_authorizer_challenge() and @@ -2067,8 +2069,7 @@ static int process_connect(struct ceph_connection *con) */ if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) { ret = con->ops->add_authorizer_challenge( - con, con->auth->authorizer_reply_buf, - le32_to_cpu(con->in_reply.authorizer_len)); + con, con->auth->authorizer_reply_buf, len); if (ret < 0) return ret; @@ -2078,10 +2079,12 @@ static int process_connect(struct ceph_connection *con) return 0; } - ret = con->ops->verify_authorizer_reply(con); - if (ret < 0) { - con->error_msg = "bad authorize reply"; - return ret; + if (len) { + ret = con->ops->verify_authorizer_reply(con); + if (ret < 0) { + con->error_msg = "bad authorize reply"; + return ret; + } } } diff --git a/net/compat.c b/net/compat.c index 959d1c51826d..3d348198004f 100644 --- a/net/compat.c +++ b/net/compat.c @@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname, char __user *optval, unsigned int optlen) { int err; - struct socket *sock = sockfd_lookup(fd, &err); + struct socket *sock; + + if (optlen > INT_MAX) + return -EINVAL; + sock = sockfd_lookup(fd, &err); if (sock) { err = security_socket_setsockopt(sock, level, optname); if (err) { diff --git a/net/core/dev.c b/net/core/dev.c index 8e276e0192a1..5d03889502eb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower, netdev_features_t feature; int feature_bit; - for_each_netdev_feature(&upper_disables, feature_bit) { + for_each_netdev_feature(upper_disables, feature_bit) { feature = __NETIF_F_BIT(feature_bit); if (!(upper->wanted_features & feature) && (features & feature)) { @@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper, netdev_features_t feature; int feature_bit; - for_each_netdev_feature(&upper_disables, feature_bit) { + for_each_netdev_feature(upper_disables, feature_bit) { feature = __NETIF_F_BIT(feature_bit); if (!(features & feature) && (lower->features & feature)) { netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", diff --git a/net/core/filter.c b/net/core/filter.c index 7a54dc11ac2d..f7d0004fc160 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) u32 off = skb_mac_header_len(skb); int ret; - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + if (!skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_cow(skb, len_diff); @@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) u32 off = skb_mac_header_len(skb); int ret; - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + if (!skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_unclone(skb, GFP_ATOMIC); @@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); int ret; - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + if (!skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_cow(skb, len_diff); @@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); int ret; - /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ - if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) + if (!skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_unclone(skb, GFP_ATOMIC); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26d848484912..2415d9cb9b89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) */ void *netdev_alloc_frag(unsigned int fragsz) { + fragsz = SKB_DATA_ALIGN(fragsz); + return __netdev_alloc_frag(fragsz, GFP_ATOMIC); } EXPORT_SYMBOL(netdev_alloc_frag); @@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) void *napi_alloc_frag(unsigned int fragsz) { + fragsz = SKB_DATA_ALIGN(fragsz); + return __napi_alloc_frag(fragsz, GFP_ATOMIC); } EXPORT_SYMBOL(napi_alloc_frag); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index a1917025e155..410f19148106 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -612,8 +612,8 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds, { struct device_node *ports, *port; struct dsa_port *dp; + int err = 0; u32 reg; - int err; ports = of_get_child_by_name(dn, "ports"); if (!ports) { @@ -624,19 +624,23 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds, for_each_available_child_of_node(ports, port) { err = of_property_read_u32(port, "reg", ®); if (err) - return err; + goto out_put_node; - if (reg >= ds->num_ports) - return -EINVAL; + if (reg >= ds->num_ports) { + err = -EINVAL; + goto out_put_node; + } dp = &ds->ports[reg]; err = dsa_port_parse_of(dp, port); if (err) - return err; + goto out_put_node; } - return 0; +out_put_node: + of_node_put(ports); + return err; } static int dsa_switch_parse_member_of(struct dsa_switch *ds, diff --git a/net/dsa/port.c b/net/dsa/port.c index 2d7e01b23572..c2261697ee83 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state) int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) { - u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING; struct dsa_switch *ds = dp->ds; int port = dp->index; int err; @@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) return err; } - dsa_port_set_state_now(dp, stp_state); + if (!dp->bridge_dev) + dsa_port_set_state_now(dp, BR_STATE_FORWARDING); return 0; } @@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy) struct dsa_switch *ds = dp->ds; int port = dp->index; - dsa_port_set_state_now(dp, BR_STATE_DISABLED); + if (!dp->bridge_dev) + dsa_port_set_state_now(dp, BR_STATE_DISABLED); if (ds->ops->port_disable) ds->ops->port_disable(ds, port, phy); @@ -291,6 +292,7 @@ static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp) return ERR_PTR(-EPROBE_DEFER); } + of_node_put(phy_dn); return phydev; } diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 777fa3b7fb13..f0165c5f376b 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: - if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) + if ((level < doi_def->map.std->lvl.cipso_size) && + (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)) return 0; break; } @@ -1735,13 +1736,26 @@ validate_return: */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { + unsigned char optbuf[sizeof(struct ip_options) + 40]; + struct ip_options *opt = (struct ip_options *)optbuf; + if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; + /* + * We might be called above the IP layer, + * so we can not use icmp_send and IPCB here. + */ + + memset(opt, 0, sizeof(struct ip_options)); + opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr); + if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL)) + return; + if (gateway) - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); + __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt); else - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); + __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt); } /** diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 5459f41fc26f..10e809b296ec 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * skb->len += tailen; skb->data_len += tailen; skb->truesize += tailen; - if (sk) + if (sk && sk_fullsock(sk)) refcount_add(tailen, &sk->sk_wmem_alloc); goto out; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index fe4f6a624238..ed14ec245584 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -710,6 +710,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, case RTA_GATEWAY: cfg->fc_gw = nla_get_be32(attr); break; + case RTA_VIA: + NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute"); + err = -EINVAL; + goto errout; case RTA_PRIORITY: cfg->fc_priority = nla_get_u32(attr); break; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 065997f414e6..3f24414150e2 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -570,7 +570,8 @@ relookup_failed: * MUST reply to only the first fragment. */ -void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) +void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, + const struct ip_options *opt) { struct iphdr *iph; int room; @@ -691,7 +692,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) iph->tos; mark = IP4_REPLY_MARK(net, skb_in->mark); - if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in)) + if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt)) goto out_unlock; @@ -742,7 +743,7 @@ out_bh_enable: local_bh_enable(); out:; } -EXPORT_SYMBOL(icmp_send); +EXPORT_SYMBOL(__icmp_send); static void icmp_socket_deliver(struct sk_buff *skb, u32 info) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 3978f807fa8b..6ae89f2b541b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1457,9 +1457,23 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) struct ip_tunnel_parm *p = &t->parms; __be16 o_flags = p->o_flags; - if ((t->erspan_ver == 1 || t->erspan_ver == 2) && - !t->collect_md) - o_flags |= TUNNEL_KEY; + if (t->erspan_ver == 1 || t->erspan_ver == 2) { + if (!t->collect_md) + o_flags |= TUNNEL_KEY; + + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) + goto nla_put_failure; + + if (t->erspan_ver == 1) { + if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) + goto nla_put_failure; + } else { + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) + goto nla_put_failure; + } + } if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || nla_put_be16(skb, IFLA_GRE_IFLAGS, @@ -1495,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; } - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) - goto nla_put_failure; - - if (t->erspan_ver == 1) { - if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) - goto nla_put_failure; - } else if (t->erspan_ver == 2) { - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) - goto nla_put_failure; - if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) - goto nla_put_failure; - } - return 0; nla_put_failure: diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 51d8efba6de2..1f4737b77067 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -307,11 +307,10 @@ drop: } static int ip_rcv_finish_core(struct net *net, struct sock *sk, - struct sk_buff *skb) + struct sk_buff *skb, struct net_device *dev) { const struct iphdr *iph = ip_hdr(skb); int (*edemux)(struct sk_buff *skb); - struct net_device *dev = skb->dev; struct rtable *rt; int err; @@ -400,6 +399,7 @@ drop_error: static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { + struct net_device *dev = skb->dev; int ret; /* if ingress device is enslaved to an L3 master device pass the @@ -409,7 +409,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) if (!skb) return NET_RX_SUCCESS; - ret = ip_rcv_finish_core(net, sk, skb); + ret = ip_rcv_finish_core(net, sk, skb, dev); if (ret != NET_RX_DROP) ret = dst_input(skb); return ret; @@ -545,6 +545,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { + struct net_device *dev = skb->dev; struct dst_entry *dst; skb_list_del_init(skb); @@ -554,7 +555,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, skb = l3mdev_ip_rcv(skb); if (!skb) continue; - if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP) + if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP) continue; dst = skb_dst(skb); diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index ed194d46c00e..32a35043c9f5 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -251,8 +251,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb) * If opt == NULL, then skb->data should point to IP header. */ -int ip_options_compile(struct net *net, - struct ip_options *opt, struct sk_buff *skb) +int __ip_options_compile(struct net *net, + struct ip_options *opt, struct sk_buff *skb, + __be32 *info) { __be32 spec_dst = htonl(INADDR_ANY); unsigned char *pp_ptr = NULL; @@ -468,11 +469,22 @@ eol: return 0; error: - if (skb) { - icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24)); - } + if (info) + *info = htonl((pp_ptr-iph)<<24); return -EINVAL; } + +int ip_options_compile(struct net *net, + struct ip_options *opt, struct sk_buff *skb) +{ + int ret; + __be32 info; + + ret = __ip_options_compile(net, opt, skb, &info); + if (ret != 0 && skb) + icmp_send(skb, ICMP_PARAMETERPROB, 0, info); + return ret; +} EXPORT_SYMBOL(ip_options_compile); /* diff --git a/net/ipv4/netlink.c b/net/ipv4/netlink.c index f86bb4f06609..d8e3a1fb8e82 100644 --- a/net/ipv4/netlink.c +++ b/net/ipv4/netlink.c @@ -3,9 +3,10 @@ #include <linux/types.h> #include <net/net_namespace.h> #include <net/netlink.h> +#include <linux/in6.h> #include <net/ip.h> -int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, +int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family, struct netlink_ext_ack *extack) { *ip_proto = nla_get_u8(attr); @@ -13,11 +14,19 @@ int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, switch (*ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: + return 0; case IPPROTO_ICMP: + if (family != AF_INET) + break; + return 0; +#if IS_ENABLED(CONFIG_IPV6) + case IPPROTO_ICMPV6: + if (family != AF_INET6) + break; return 0; - default: - NL_SET_ERR_MSG(extack, "Unsupported ip proto"); - return -EOPNOTSUPP; +#endif } + NL_SET_ERR_MSG(extack, "Unsupported ip proto"); + return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 5163b64f8fb3..7bb9128c8363 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2803,7 +2803,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (tb[RTA_IP_PROTO]) { err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO], - &ip_proto, extack); + &ip_proto, AF_INET, extack); if (err) return err; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2079145a3b7c..cf3c5095c10e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk) sk_mem_reclaim(sk); tcp_clear_all_retrans_hints(tcp_sk(sk)); tcp_sk(sk)->packets_out = 0; + inet_csk(sk)->icsk_backoff = 0; } int tcp_disconnect(struct sock *sk, int flags) @@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags) tp->write_seq += tp->max_window + 2; if (tp->write_seq == 0) tp->write_seq = 1; - icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index efc6fef692ff..ec3cea9d6828 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info) if (sock_owned_by_user(sk)) break; + skb = tcp_rtx_queue_head(sk); + if (WARN_ON_ONCE(!skb)) + break; + icsk->icsk_backoff--; icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); - skb = tcp_rtx_queue_head(sk); tcp_mstamp_refresh(tp); delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 730bc44dbad9..ccc78f3a4b60 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); + tcp_init_tso_segs(skb, mss_now); goto repair; /* Skip network transmission */ } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5c3cd5d84a6f..372fdc5381a9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info) for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { int (*handler)(struct sk_buff *skb, u32 info); + const struct ip_tunnel_encap_ops *encap; - if (!iptun_encaps[i]) + encap = rcu_dereference(iptun_encaps[i]); + if (!encap) continue; - handler = rcu_dereference(iptun_encaps[i]->err_handler); + handler = encap->err_handler; if (handler && !handler(skb, info)) return 0; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 5afe9f83374d..239d4a65ad6e 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info skb->len += tailen; skb->data_len += tailen; skb->truesize += tailen; - if (sk) + if (sk && sk_fullsock(sk)) refcount_add(tailen, &sk->sk_wmem_alloc); goto out; diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index b858bd5280bf..867474abe269 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c @@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, static int gue6_err_proto_handler(int proto, struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, u32 info) + u8 type, u8 code, int offset, __be32 info) { const struct inet6_protocol *ipprot; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 801a9a0c217e..26f25b6e2833 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1719,6 +1719,27 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } +static void ip6erspan_set_version(struct nlattr *data[], + struct __ip6_tnl_parm *parms) +{ + if (!data) + return; + + parms->erspan_ver = 1; + if (data[IFLA_GRE_ERSPAN_VER]) + parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); + + if (parms->erspan_ver == 1) { + if (data[IFLA_GRE_ERSPAN_INDEX]) + parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); + } else if (parms->erspan_ver == 2) { + if (data[IFLA_GRE_ERSPAN_DIR]) + parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); + if (data[IFLA_GRE_ERSPAN_HWID]) + parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); + } +} + static void ip6gre_netlink_parms(struct nlattr *data[], struct __ip6_tnl_parm *parms) { @@ -1767,20 +1788,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[], if (data[IFLA_GRE_COLLECT_METADATA]) parms->collect_md = true; - - parms->erspan_ver = 1; - if (data[IFLA_GRE_ERSPAN_VER]) - parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); - - if (parms->erspan_ver == 1) { - if (data[IFLA_GRE_ERSPAN_INDEX]) - parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); - } else if (parms->erspan_ver == 2) { - if (data[IFLA_GRE_ERSPAN_DIR]) - parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); - if (data[IFLA_GRE_ERSPAN_HWID]) - parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); - } } static int ip6gre_tap_init(struct net_device *dev) @@ -2100,9 +2107,23 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) struct __ip6_tnl_parm *p = &t->parms; __be16 o_flags = p->o_flags; - if ((p->erspan_ver == 1 || p->erspan_ver == 2) && - !p->collect_md) - o_flags |= TUNNEL_KEY; + if (p->erspan_ver == 1 || p->erspan_ver == 2) { + if (!p->collect_md) + o_flags |= TUNNEL_KEY; + + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) + goto nla_put_failure; + + if (p->erspan_ver == 1) { + if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) + goto nla_put_failure; + } else { + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) + goto nla_put_failure; + } + } if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || nla_put_be16(skb, IFLA_GRE_IFLAGS, @@ -2117,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || - nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || - nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) + nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, @@ -2136,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; } - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) - goto nla_put_failure; - - if (p->erspan_ver == 1) { - if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) - goto nla_put_failure; - } else if (p->erspan_ver == 2) { - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) - goto nla_put_failure; - if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) - goto nla_put_failure; - } - return 0; nla_put_failure: @@ -2203,6 +2210,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, int err; ip6gre_netlink_parms(data, &nt->parms); + ip6erspan_set_version(data, &nt->parms); ign = net_generic(net, ip6gre_net_id); if (nt->parms.collect_md) { @@ -2248,6 +2256,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], if (IS_ERR(t)) return PTR_ERR(t); + ip6erspan_set_version(data, &p); ip6gre_tunnel_unlink_md(ign, t); ip6gre_tunnel_unlink(ign, t); ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 964491cf3672..8dad1d690b78 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock); static void rt6_remove_exception(struct rt6_exception_bucket *bucket, struct rt6_exception *rt6_ex) { + struct fib6_info *from; struct net *net; if (!bucket || !rt6_ex) return; net = dev_net(rt6_ex->rt6i->dst.dev); + net->ipv6.rt6_stats->fib_rt_cache--; + + /* purge completely the exception to allow releasing the held resources: + * some [sk] cache may keep the dst around for unlimited time + */ + from = rcu_dereference_protected(rt6_ex->rt6i->from, + lockdep_is_held(&rt6_exception_lock)); + rcu_assign_pointer(rt6_ex->rt6i->from, NULL); + fib6_info_release(from); + dst_dev_put(&rt6_ex->rt6i->dst); + hlist_del_rcu(&rt6_ex->hlist); dst_release(&rt6_ex->rt6i->dst); kfree_rcu(rt6_ex, rcu); WARN_ON_ONCE(!bucket->depth); bucket->depth--; - net->ipv6.rt6_stats->fib_rt_cache--; } /* Remove oldest rt6_ex in bucket and free the memory @@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt) static void rt6_update_exception_stamp_rt(struct rt6_info *rt) { struct rt6_exception_bucket *bucket; - struct fib6_info *from = rt->from; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; - - if (!from || - !(rt->rt6i_flags & RTF_CACHE)) - return; + struct fib6_info *from; rcu_read_lock(); + from = rcu_dereference(rt->from); + if (!from || !(rt->rt6i_flags & RTF_CACHE)) + goto unlock; + bucket = rcu_dereference(from->rt6i_exception_bucket); #ifdef CONFIG_IPV6_SUBTREES @@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt) if (rt6_ex) rt6_ex->stamp = jiffies; +unlock: rcu_read_unlock(); } @@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net, u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; const struct in6_addr *gw_addr = &cfg->fc_gateway; u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT; + struct fib6_info *from; struct rt6_info *grt; int err; err = 0; grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0); if (grt) { + rcu_read_lock(); + from = rcu_dereference(grt->from); if (!grt->dst.error && /* ignore match if it is the default route */ - grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) && + from && !ipv6_addr_any(&from->fib6_dst.addr) && (grt->rt6i_flags & flags || dev != grt->dst.dev)) { NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway or device mismatch"); err = -EINVAL; } + rcu_read_unlock(); ip6_rt_put(grt); } @@ -4166,6 +4182,10 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); cfg->fc_flags |= RTF_GATEWAY; } + if (tb[RTA_VIA]) { + NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute"); + goto errout; + } if (tb[RTA_DST]) { int plen = (rtm->rtm_dst_len + 7) >> 3; @@ -4649,7 +4669,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, table = rt->fib6_table->tb6_id; else table = RT6_TABLE_UNSPEC; - rtm->rtm_table = table; + rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT; if (nla_put_u32(skb, RTA_TABLE, table)) goto nla_put_failure; @@ -4873,7 +4893,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (tb[RTA_IP_PROTO]) { err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO], - &fl6.flowi6_proto, extack); + &fl6.flowi6_proto, AF_INET6, + extack); if (err) goto errout; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e8a1dabef803..09e440e8dfae 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1873,6 +1873,7 @@ static int __net_init sit_init_net(struct net *net) err_reg_dev: ipip6_dev_free(sitn->fb_tunnel_dev); + free_netdev(sitn->fb_tunnel_dev); err_alloc_dev: return err; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 2596ffdeebea..b444483cdb2b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int peeked, peeking, off; int err; int is_udplite = IS_UDPLITE(sk); + struct udp_mib __percpu *mib; bool checksum_valid = false; - struct udp_mib *mib; int is_udp4; if (flags & MSG_ERRQUEUE) @@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable); */ static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, u32 info) + u8 type, u8 code, int offset, __be32 info) { int i; for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, - u8 type, u8 code, int offset, u32 info); + u8 type, u8 code, int offset, __be32 info); + const struct ip6_tnl_encap_ops *encap; - if (!ip6tun_encaps[i]) + encap = rcu_dereference(ip6tun_encaps[i]); + if (!encap) continue; - handler = rcu_dereference(ip6tun_encaps[i]->err_handler); + handler = encap->err_handler; if (handler && !handler(skb, opt, type, code, offset, info)) return 0; } diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index f5b4febeaa25..bc65db782bfb 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net) struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); unsigned int i; - xfrm_state_flush(net, IPSEC_PROTO_ANY, false); xfrm_flush_gc(); + xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); diff --git a/net/key/af_key.c b/net/key/af_key.c index 655c787f9d54..5651c29cb5bd 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock) return 0; } -static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, - gfp_t allocation, struct sock *sk) +static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation, + struct sock *sk) { int err = -ENOBUFS; - sock_hold(sk); - if (*skb2 == NULL) { - if (refcount_read(&skb->users) != 1) { - *skb2 = skb_clone(skb, allocation); - } else { - *skb2 = skb; - refcount_inc(&skb->users); - } - } - if (*skb2 != NULL) { - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { - skb_set_owner_r(*skb2, sk); - skb_queue_tail(&sk->sk_receive_queue, *skb2); - sk->sk_data_ready(sk); - *skb2 = NULL; - err = 0; - } + if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) + return err; + + skb = skb_clone(skb, allocation); + + if (skb) { + skb_set_owner_r(skb, sk); + skb_queue_tail(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk); + err = 0; } - sock_put(sk); return err; } @@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); struct sock *sk; - struct sk_buff *skb2 = NULL; int err = -ESRCH; /* XXX Do we need something like netlink_overrun? I think @@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, * socket. */ if (pfk->promisc) - pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); + pfkey_broadcast_one(skb, GFP_ATOMIC, sk); /* the exact target will be processed later */ if (sk == one_sk) @@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, continue; } - err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); + err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk); /* Error is cleared after successful sending to at least one * registered KM */ @@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, rcu_read_unlock(); if (one_sk != NULL) - err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); + err = pfkey_broadcast_one(skb, allocation, one_sk); - kfree_skb(skb2); kfree_skb(skb); return err; } @@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m if (proto == 0) return -EINVAL; - err = xfrm_state_flush(net, proto, true); + err = xfrm_state_flush(net, proto, true, false); err2 = unicast_flush_resp(sk, hdr); if (err || err2) { if (err == -ESRCH) /* empty table - go quietly */ diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2493c74c2d37..96496b2c1670 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, BSS_CHANGED_P2P_PS | BSS_CHANGED_TXPOWER; int err; + int prev_beacon_int; old = sdata_dereference(sdata->u.ap.beacon, sdata); if (old) @@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, sdata->needed_rx_chains = sdata->local->rx_chains; + prev_beacon_int = sdata->vif.bss_conf.beacon_int; sdata->vif.bss_conf.beacon_int = params->beacon_interval; if (params->he_cap) @@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, if (!err) ieee80211_vif_copy_chanctx_to_vlans(sdata, false); mutex_unlock(&local->mtx); - if (err) + if (err) { + sdata->vif.bss_conf.beacon_int = prev_beacon_int; return err; + } /* * Apply control port protocol, this allows us to diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 87a729926734..977dea436ee8 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, * We need a bit of data queued to build aggregates properly, so * instruct the TCP stack to allow more than a single ms of data * to be queued in the stack. The value is a bit-shift of 1 - * second, so 8 is ~4ms of queued data. Only affects local TCP + * second, so 7 is ~8ms of queued data. Only affects local TCP * sockets. * This is the default, anyhow - drivers may need to override it * for local reasons (longer buffers, longer completion time, or * similar). */ - local->hw.tx_sk_pacing_shift = 8; + local->hw.tx_sk_pacing_shift = 7; /* set up some defaults */ local->hw.queues = 1; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index cad6592c52a1..2ec7011a4d07 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -70,6 +70,7 @@ enum mesh_deferred_task_flags { * @dst: mesh path destination mac address * @mpp: mesh proxy mac address * @rhash: rhashtable list pointer + * @walk_list: linked list containing all mesh_path objects. * @gate_list: list pointer for known gates list * @sdata: mesh subif * @next_hop: mesh neighbor to which frames for this destination will be @@ -105,6 +106,7 @@ struct mesh_path { u8 dst[ETH_ALEN]; u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ struct rhash_head rhash; + struct hlist_node walk_list; struct hlist_node gate_list; struct ieee80211_sub_if_data *sdata; struct sta_info __rcu *next_hop; @@ -133,12 +135,16 @@ struct mesh_path { * gate's mpath may or may not be resolved and active. * @gates_lock: protects updates to known_gates * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr + * @walk_head: linked list containging all mesh_path objects + * @walk_lock: lock protecting walk_head * @entries: number of entries in the table */ struct mesh_table { struct hlist_head known_gates; spinlock_t gates_lock; struct rhashtable rhead; + struct hlist_head walk_head; + spinlock_t walk_lock; atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ }; diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index a5125624a76d..88a6d5e18ccc 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void) return NULL; INIT_HLIST_HEAD(&newtbl->known_gates); + INIT_HLIST_HEAD(&newtbl->walk_head); atomic_set(&newtbl->entries, 0); spin_lock_init(&newtbl->gates_lock); + spin_lock_init(&newtbl->walk_lock); return newtbl; } @@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) static struct mesh_path * __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) { - int i = 0, ret; - struct mesh_path *mpath = NULL; - struct rhashtable_iter iter; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return NULL; - - rhashtable_walk_start(&iter); + int i = 0; + struct mesh_path *mpath; - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { if (i++ == idx) break; } - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); - if (IS_ERR(mpath) || !mpath) + if (!mpath) return NULL; if (mpath_expired(mpath)) { @@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, return ERR_PTR(-ENOMEM); tbl = sdata->u.mesh.mesh_paths; + spin_lock_bh(&tbl->walk_lock); do { ret = rhashtable_lookup_insert_fast(&tbl->rhead, &new_mpath->rhash, @@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params); - + else if (!ret) + hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); } while (unlikely(ret == -EEXIST && !mpath)); + spin_unlock_bh(&tbl->walk_lock); - if (ret && ret != -EEXIST) - return ERR_PTR(ret); - - /* At this point either new_mpath was added, or we found a - * matching entry already in the table; in the latter case - * free the unnecessary new entry. - */ - if (ret == -EEXIST) { + if (ret) { kfree(new_mpath); + + if (ret != -EEXIST) + return ERR_PTR(ret); + new_mpath = mpath; } + sdata->u.mesh.mesh_paths_generation++; return new_mpath; } @@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, memcpy(new_mpath->mpp, mpp, ETH_ALEN); tbl = sdata->u.mesh.mpp_paths; + + spin_lock_bh(&tbl->walk_lock); ret = rhashtable_lookup_insert_fast(&tbl->rhead, &new_mpath->rhash, mesh_rht_params); + if (!ret) + hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); + spin_unlock_bh(&tbl->walk_lock); + + if (ret) + kfree(new_mpath); sdata->u.mesh.mpp_paths_generation++; return ret; @@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta) struct mesh_table *tbl = sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return; - rhashtable_walk_start(&iter); - - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + rcu_read_lock(); + hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { if (rcu_access_pointer(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { @@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta) WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); } } - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + rcu_read_unlock(); } static void mesh_path_free_rcu(struct mesh_table *tbl, @@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) { + hlist_del_rcu(&mpath->walk_list); rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); mesh_path_free_rcu(tbl, mpath); } @@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return; - - rhashtable_walk_start(&iter); - - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + struct hlist_node *n; + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if (rcu_access_pointer(mpath->next_hop) == sta) __mesh_path_del(tbl, mpath); } - - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + spin_unlock_bh(&tbl->walk_lock); } static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, @@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, { struct mesh_table *tbl = sdata->u.mesh.mpp_paths; struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return; - - rhashtable_walk_start(&iter); - - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + struct hlist_node *n; + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if (ether_addr_equal(mpath->mpp, proxy)) __mesh_path_del(tbl, mpath); } - - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + spin_unlock_bh(&tbl->walk_lock); } static void table_flush_by_iface(struct mesh_table *tbl) { struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; - - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); - if (ret) - return; - - rhashtable_walk_start(&iter); + struct hlist_node *n; - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { __mesh_path_del(tbl, mpath); } - - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + spin_unlock_bh(&tbl->walk_lock); } /** @@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl, { struct mesh_path *mpath; - rcu_read_lock(); + spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); if (!mpath) { - rcu_read_unlock(); + spin_unlock_bh(&tbl->walk_lock); return -ENXIO; } __mesh_path_del(tbl, mpath); - rcu_read_unlock(); + spin_unlock_bh(&tbl->walk_lock); return 0; } @@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) { struct mesh_path *mpath; - struct rhashtable_iter iter; - int ret; + struct hlist_node *n; - ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); - if (ret) - return; - - rhashtable_walk_start(&iter); - - while ((mpath = rhashtable_walk_next(&iter))) { - if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) - continue; - if (IS_ERR(mpath)) - break; + spin_lock_bh(&tbl->walk_lock); + hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) __mesh_path_del(tbl, mpath); } - - rhashtable_walk_stop(&iter); - rhashtable_walk_exit(&iter); + spin_unlock_bh(&tbl->walk_lock); } void mesh_path_expire(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb4d71efb6fb..c2a6da5d80da 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) struct ieee80211_sub_if_data *sdata = rx->sdata; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u16 ac, q, hdrlen; + int tailroom = 0; hdr = (struct ieee80211_hdr *) skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); @@ -2732,8 +2733,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) if (!ifmsh->mshcfg.dot11MeshForwarding) goto out; + if (sdata->crypto_tx_tailroom_needed_cnt) + tailroom = IEEE80211_ENCRYPT_TAILROOM; + fwd_skb = skb_copy_expand(skb, local->tx_headroom + - sdata->encrypt_headroom, 0, GFP_ATOMIC); + sdata->encrypt_headroom, + tailroom, GFP_ATOMIC); if (!fwd_skb) goto out; diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 7d55d4c04088..fa763e2e50ec 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1838,6 +1838,9 @@ static int rtm_to_route_config(struct sk_buff *skb, goto errout; break; } + case RTA_GATEWAY: + NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute"); + goto errout; case RTA_VIA: { if (nla_get_via(nla, &cfg->rc_via_alen, diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 86afacb07e5f..ac8d848d7624 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, { struct ip_vs_dest *dest; unsigned int atype, i; - int ret = 0; EnterFunction(2); #ifdef CONFIG_IP_VS_IPV6 if (udest->af == AF_INET6) { + int ret; + atype = ipv6_addr_type(&udest->addr.in6); if ((!(atype & IPV6_ADDR_UNICAST) || atype & IPV6_ADDR_LINKLOCAL) && diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5a92f23f179f..4893f248dfdc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -313,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx) int err; list_for_each_entry(rule, &ctx->chain->rules, list) { + if (!nft_is_active_next(ctx->net, rule)) + continue; + err = nft_delrule(ctx, rule); if (err < 0) return err; diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index ea7c67050792..ee3e5b6471a6 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, (state == 0 && (byte & bitmask) == 0)) return bit_spot; - bit_spot++; + if (++bit_spot >= bitmap_len) + return -1; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 6a196e438b6c..d1fc019e932e 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) sock->service_name, sock->service_name_len, &service_name_tlv_length); + if (!service_name_tlv) { + err = -ENOMEM; + goto error_tlv; + } size += service_name_tlv_length; } @@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, &miux_tlv_length); + if (!miux_tlv) { + err = -ENOMEM; + goto error_tlv; + } size += miux_tlv_length; rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); + if (!rw_tlv) { + err = -ENOMEM; + goto error_tlv; + } size += rw_tlv_length; pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); @@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, &miux_tlv_length); + if (!miux_tlv) { + err = -ENOMEM; + goto error_tlv; + } size += miux_tlv_length; rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); + if (!rw_tlv) { + err = -ENOMEM; + goto error_tlv; + } size += rw_tlv_length; skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index ef4026a23e80..4fa015208aab 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local) static int nfc_llcp_build_gb(struct nfc_llcp_local *local) { - u8 *gb_cur, *version_tlv, version, version_length; - u8 *lto_tlv, lto_length; - u8 *wks_tlv, wks_length; - u8 *miux_tlv, miux_length; + u8 *gb_cur, version, version_length; + u8 lto_length, wks_length, miux_length; + u8 *version_tlv = NULL, *lto_tlv = NULL, + *wks_tlv = NULL, *miux_tlv = NULL; __be16 wks = cpu_to_be16(local->local_wks); u8 gb_len = 0; int ret = 0; @@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) version = LLCP_VERSION_11; version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version, 1, &version_length); + if (!version_tlv) { + ret = -ENOMEM; + goto out; + } gb_len += version_length; lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, <o_length); + if (!lto_tlv) { + ret = -ENOMEM; + goto out; + } gb_len += lto_length; pr_debug("Local wks 0x%lx\n", local->local_wks); wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length); + if (!wks_tlv) { + ret = -ENOMEM; + goto out; + } gb_len += wks_length; miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, &miux_length); + if (!miux_tlv) { + ret = -ENOMEM; + goto out; + } gb_len += miux_length; gb_len += ARRAY_SIZE(llcp_magic); diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 9fc76b19cd3c..db3473540303 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code, ph->utid = 0; ph->message_id = id; ph->pipe_handle = pn->pipe_handle; - ph->data[0] = code; + ph->error_code = code; return pn_skb_send(sk, skb, NULL); } @@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code, ph->utid = id; /* whatever */ ph->message_id = id; ph->pipe_handle = pn->pipe_handle; - ph->data[0] = code; + ph->error_code = code; return pn_skb_send(sk, skb, NULL); } @@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, struct pnpipehdr *ph; struct sockaddr_pn dst; u8 data[4] = { - oph->data[0], /* PEP type */ + oph->pep_type, /* PEP type */ code, /* error code, at an unusual offset */ PAD, PAD, }; @@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, ph->utid = oph->utid; ph->message_id = PNS_PEP_CTRL_RESP; ph->pipe_handle = oph->pipe_handle; - ph->data[0] = oph->data[1]; /* CTRL id */ + ph->data0 = oph->data[0]; /* CTRL id */ pn_skb_get_src_sockaddr(oskb, &dst); return pn_skb_send(sk, skb, &dst); @@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) return -EINVAL; hdr = pnp_hdr(skb); - if (hdr->data[0] != PN_PEP_TYPE_COMMON) { + if (hdr->pep_type != PN_PEP_TYPE_COMMON) { net_dbg_ratelimited("Phonet unknown PEP type: %u\n", - (unsigned int)hdr->data[0]); + (unsigned int)hdr->pep_type); return -EOPNOTSUPP; } - switch (hdr->data[1]) { + switch (hdr->data[0]) { case PN_PEP_IND_FLOW_CONTROL: switch (pn->tx_fc) { case PN_LEGACY_FLOW_CONTROL: - switch (hdr->data[4]) { + switch (hdr->data[3]) { case PEP_IND_BUSY: atomic_set(&pn->tx_credits, 0); break; @@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) } break; case PN_ONE_CREDIT_FLOW_CONTROL: - if (hdr->data[4] == PEP_IND_READY) + if (hdr->data[3] == PEP_IND_READY) atomic_set(&pn->tx_credits, wake = 1); break; } @@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) break; - atomic_add(wake = hdr->data[4], &pn->tx_credits); + atomic_add(wake = hdr->data[3], &pn->tx_credits); break; default: net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", - (unsigned int)hdr->data[1]); + (unsigned int)hdr->data[0]); return -EOPNOTSUPP; } if (wake) @@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *hdr = pnp_hdr(skb); - u8 n_sb = hdr->data[0]; + u8 n_sb = hdr->data0; pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; __skb_pull(skb, sizeof(*hdr)); @@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) return -ECONNREFUSED; /* Parse sub-blocks */ - n_sb = hdr->data[4]; + n_sb = hdr->data[3]; while (n_sb > 0) { u8 type, buf[6], len = sizeof(buf); const u8 *data = pep_get_sb(skb, &type, &len, buf); @@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk) ph->utid = 0; ph->message_id = PNS_PIPE_REMOVE_REQ; ph->pipe_handle = pn->pipe_handle; - ph->data[0] = PAD; + ph->data0 = PAD; return pn_skb_send(sk, skb, NULL); } @@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp, peer_type = hdr->other_pep_type << 8; /* Parse sub-blocks (options) */ - n_sb = hdr->data[4]; + n_sb = hdr->data[3]; while (n_sb > 0) { u8 type, buf[1], len = sizeof(buf); const u8 *data = pep_get_sb(skb, &type, &len, buf); @@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) ph->utid = 0; if (pn->aligned) { ph->message_id = PNS_PIPE_ALIGNED_DATA; - ph->data[0] = 0; /* padding */ + ph->data0 = 0; /* padding */ } else ph->message_id = PNS_PIPE_DATA; ph->pipe_handle = pn->pipe_handle; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8af6c11d2482..faa1addf89b3 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -199,8 +199,7 @@ err3: err2: kfree(tname); err1: - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return err; } diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 64dba3708fce..cfceed28c333 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -189,8 +189,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { - if (ret == ACT_P_CREATED) - tcf_idr_release(*a, bind); + tcf_idr_release(*a, bind); return -ENOMEM; } diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 8b43fe0130f7..3f943de9a2c9 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -377,7 +377,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, return ret; release_tun_meta: - dst_release(&metadata->dst); + if (metadata) + dst_release(&metadata->dst); err_out: if (exists) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 75046ec72144..cc9d8133afcd 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -447,6 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, int nb = 0; int count = 1; int rc = NET_XMIT_SUCCESS; + int rc_drop = NET_XMIT_DROP; /* Do not fool qdisc_drop_all() */ skb->prev = NULL; @@ -486,6 +487,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, q->duplicate = 0; rootq->enqueue(skb2, rootq, to_free); q->duplicate = dupsave; + rc_drop = NET_XMIT_SUCCESS; } /* @@ -498,7 +500,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (skb_is_gso(skb)) { segs = netem_segment(skb, sch, to_free); if (!segs) - return NET_XMIT_DROP; + return rc_drop; } else { segs = skb; } @@ -521,8 +523,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, 1<<(prandom_u32() % 8); } - if (unlikely(sch->q.qlen >= sch->limit)) - return qdisc_drop_all(skb, sch, to_free); + if (unlikely(sch->q.qlen >= sch->limit)) { + qdisc_drop_all(skb, sch, to_free); + return rc_drop; + } qdisc_qstats_backlog_inc(sch, skb); diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 64bef313d436..5cb7c1ff97e9 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -192,7 +192,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, if (unlikely(!max_data)) { max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk), sctp_datachk_len(&asoc->stream)); - pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)", + pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%zu)", __func__, asoc, max_data); } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 033696e6f74f..ad158d311ffa 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport) /* When a data chunk is sent, reset the heartbeat interval. */ expires = jiffies + sctp_transport_timeout(transport); - if (time_before(transport->hb_timer.expires, expires) && + if ((time_before(transport->hb_timer.expires, expires) || + !timer_pending(&transport->hb_timer)) && !mod_timer(&transport->hb_timer, expires + prandom_u32_max(transport->rto))) sctp_transport_hold(transport); diff --git a/net/smc/smc.h b/net/smc/smc.h index 5721416d0605..adbdf195eb08 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */ } __aligned(8); enum smc_urg_state { - SMC_URG_VALID, /* data present */ - SMC_URG_NOTYET, /* data pending */ - SMC_URG_READ /* data was already read */ + SMC_URG_VALID = 1, /* data present */ + SMC_URG_NOTYET = 2, /* data pending */ + SMC_URG_READ = 3, /* data was already read */ }; struct smc_connection { diff --git a/net/socket.c b/net/socket.c index d80d87a395ea..320f51b22b19 100644 --- a/net/socket.c +++ b/net/socket.c @@ -577,6 +577,7 @@ static void __sock_release(struct socket *sock, struct inode *inode) if (inode) inode_lock(inode); sock->ops->release(sock); + sock->sk = NULL; if (inode) inode_unlock(inode); sock->ops = NULL; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1217c90a363b..70343ac448b1 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -379,16 +379,18 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout) #define tipc_wait_for_cond(sock_, timeo_, condition_) \ ({ \ + DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ struct sock *sk_; \ int rc_; \ \ while ((rc_ = !(condition_))) { \ - DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ + /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \ + smp_rmb(); \ sk_ = (sock_)->sk; \ rc_ = tipc_sk_sock_err((sock_), timeo_); \ if (rc_) \ break; \ - prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ + add_wait_queue(sk_sleep(sk_), &wait_); \ release_sock(sk_); \ *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ sched_annotate_sleep(); \ @@ -1677,7 +1679,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk) static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) { struct sock *sk = sock->sk; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); long timeo = *timeop; int err = sock_error(sk); @@ -1685,15 +1687,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) return err; for (;;) { - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { if (sk->sk_shutdown & RCV_SHUTDOWN) { err = -ENOTCONN; break; } + add_wait_queue(sk_sleep(sk), &wait); release_sock(sk); - timeo = schedule_timeout(timeo); + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + sched_annotate_sleep(); lock_sock(sk); + remove_wait_queue(sk_sleep(sk), &wait); } err = 0; if (!skb_queue_empty(&sk->sk_receive_queue)) @@ -1709,7 +1713,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) if (err) break; } - finish_wait(sk_sleep(sk), &wait); *timeop = timeo; return err; } @@ -1982,6 +1985,8 @@ static void tipc_sk_proto_rcv(struct sock *sk, return; case SOCK_WAKEUP: tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); + /* coupled with smp_rmb() in tipc_wait_for_cond() */ + smp_wmb(); tsk->cong_link_cnt--; wakeup = true; break; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 74d1eed7cbd4..a95d479caeea 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -890,7 +890,7 @@ retry: addr->hash ^= sk->sk_type; __unix_remove_socket(sk); - u->addr = addr; + smp_store_release(&u->addr, addr); __unix_insert_socket(&unix_socket_table[addr->hash], sk); spin_unlock(&unix_table_lock); err = 0; @@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = 0; __unix_remove_socket(sk); - u->addr = addr; + smp_store_release(&u->addr, addr); __unix_insert_socket(list, sk); out_unlock: @@ -1331,15 +1331,29 @@ restart: RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); otheru = unix_sk(other); - /* copy address information from listening to new sock*/ - if (otheru->addr) { - refcount_inc(&otheru->addr->refcnt); - newu->addr = otheru->addr; - } + /* copy address information from listening to new sock + * + * The contents of *(otheru->addr) and otheru->path + * are seen fully set up here, since we have found + * otheru in hash under unix_table_lock. Insertion + * into the hash chain we'd found it in had been done + * in an earlier critical area protected by unix_table_lock, + * the same one where we'd set *(otheru->addr) contents, + * as well as otheru->path and otheru->addr itself. + * + * Using smp_store_release() here to set newu->addr + * is enough to make those stores, as well as stores + * to newu->path visible to anyone who gets newu->addr + * by smp_load_acquire(). IOW, the same warranties + * as for unix_sock instances bound in unix_bind() or + * in unix_autobind(). + */ if (otheru->path.dentry) { path_get(&otheru->path); newu->path = otheru->path; } + refcount_inc(&otheru->addr->refcnt); + smp_store_release(&newu->addr, otheru->addr); /* Set credentials */ copy_peercred(sk, other); @@ -1453,7 +1467,7 @@ out: static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sock *sk = sock->sk; - struct unix_sock *u; + struct unix_address *addr; DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); int err = 0; @@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) sock_hold(sk); } - u = unix_sk(sk); - unix_state_lock(sk); - if (!u->addr) { + addr = smp_load_acquire(&unix_sk(sk)->addr); + if (!addr) { sunaddr->sun_family = AF_UNIX; sunaddr->sun_path[0] = 0; err = sizeof(short); } else { - struct unix_address *addr = u->addr; - err = addr->len; memcpy(sunaddr, addr->name, addr->len); } - unix_state_unlock(sk); sock_put(sk); out: return err; @@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { - struct unix_sock *u = unix_sk(sk); + struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); - if (u->addr) { - msg->msg_namelen = u->addr->len; - memcpy(msg->msg_name, u->addr->name, u->addr->len); + if (addr) { + msg->msg_namelen = addr->len; + memcpy(msg->msg_name, addr->name, addr->len); } } @@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk) if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; - unix_state_lock(sk); + if (!smp_load_acquire(&unix_sk(sk)->addr)) + return -ENOENT; + path = unix_sk(sk)->path; - if (!path.dentry) { - unix_state_unlock(sk); + if (!path.dentry) return -ENOENT; - } path_get(&path); - unix_state_unlock(sk); fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) @@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v) (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), sock_i_ino(s)); - if (u->addr) { + if (u->addr) { // under unix_table_lock here int i, len; seq_putc(seq, ' '); diff --git a/net/unix/diag.c b/net/unix/diag.c index 384c84e83462..3183d9b8ab33 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -10,7 +10,8 @@ static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) { - struct unix_address *addr = unix_sk(sk)->addr; + /* might or might not have unix_table_lock */ + struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); if (!addr) return 0; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index ec3a828672ef..eff31348e20b 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -679,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; int len, i, rc = 0; - if (!sock_flag(sk, SOCK_ZAPPED) || - addr_len != sizeof(struct sockaddr_x25) || + if (addr_len != sizeof(struct sockaddr_x25) || addr->sx25_family != AF_X25) { rc = -EINVAL; goto out; @@ -699,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) } lock_sock(sk); - x25_sk(sk)->source_addr = addr->sx25_addr; - x25_insert_socket(sk); - sock_reset_flag(sk, SOCK_ZAPPED); + if (sock_flag(sk, SOCK_ZAPPED)) { + x25_sk(sk)->source_addr = addr->sx25_addr; + x25_insert_socket(sk); + sock_reset_flag(sk, SOCK_ZAPPED); + } else { + rc = -EINVAL; + } release_sock(sk); SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); out: diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index d4de871e7d4d..37e1fe180769 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, return 0; err_unreg_umem: - xdp_clear_umem_at_qid(dev, queue_id); if (!force_zc) err = 0; /* fallback to copy mode */ + if (err) + xdp_clear_umem_at_qid(dev, queue_id); out_rtnl_unlock: rtnl_unlock(); return err; @@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem) if (!umem->pgs) return -ENOMEM; - down_write(¤t->mm->mmap_sem); - npgs = get_user_pages(umem->address, umem->npgs, - gup_flags, &umem->pgs[0], NULL); - up_write(¤t->mm->mmap_sem); + down_read(¤t->mm->mmap_sem); + npgs = get_user_pages_longterm(umem->address, umem->npgs, + gup_flags, &umem->pgs[0], NULL); + up_read(¤t->mm->mmap_sem); if (npgs != umem->npgs) { if (npgs >= 0) { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index a03268454a27..85e4fe4f18cc 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock) xskq_destroy(xs->rx); xskq_destroy(xs->tx); - xdp_put_umem(xs->umem); sock_orphan(sk); sock->sk = NULL; @@ -669,6 +668,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, if (!umem) return -EINVAL; + /* Matches the smp_wmb() in XDP_UMEM_REG */ + smp_rmb(); if (offset == XDP_UMEM_PGOFF_FILL_RING) q = READ_ONCE(umem->fq); else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) @@ -678,6 +679,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, if (!q) return -EINVAL; + /* Matches the smp_wmb() in xsk_init_queue */ + smp_rmb(); qpg = virt_to_head_page(q->ring); if (size > (PAGE_SIZE << compound_order(qpg))) return -EINVAL; @@ -714,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = { .sendpage = sock_no_sendpage, }; +static void xsk_destruct(struct sock *sk) +{ + struct xdp_sock *xs = xdp_sk(sk); + + if (!sock_flag(sk, SOCK_DEAD)) + return; + + xdp_put_umem(xs->umem); + + sk_refcnt_debug_dec(sk); +} + static int xsk_create(struct net *net, struct socket *sock, int protocol, int kern) { @@ -740,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, sk->sk_family = PF_XDP; + sk->sk_destruct = xsk_destruct; + sk_refcnt_debug_inc(sk); + sock_set_flag(sk, SOCK_RCU_FREE); xs = xdp_sk(sk); diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 6be8c7df15bb..dbb3c1945b5c 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb) int ifindex; struct xfrm_if *xi; - if (!skb->dev) + if (!secpath_exists(skb) || !skb->dev) return NULL; - xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id); + xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id); ifindex = skb->dev->ifindex; for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ba0a4048c846..8d1a898d0ba5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3314,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, if (ifcb) { xi = ifcb->decode_session(skb); - if (xi) + if (xi) { if_id = xi->p.if_id; + net = xi->net; + } } rcu_read_unlock(); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 23c92891758a..1bb971f46fc6 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_free); -static void xfrm_state_gc_destroy(struct xfrm_state *x) +static void ___xfrm_state_destroy(struct xfrm_state *x) { tasklet_hrtimer_cancel(&x->mtimer); del_timer_sync(&x->rtimer); @@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work) synchronize_rcu(); hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) - xfrm_state_gc_destroy(x); + ___xfrm_state_destroy(x); } static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) @@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) } EXPORT_SYMBOL(xfrm_state_alloc); -void __xfrm_state_destroy(struct xfrm_state *x) +void __xfrm_state_destroy(struct xfrm_state *x, bool sync) { WARN_ON(x->km.state != XFRM_STATE_DEAD); - spin_lock_bh(&xfrm_state_gc_lock); - hlist_add_head(&x->gclist, &xfrm_state_gc_list); - spin_unlock_bh(&xfrm_state_gc_lock); - schedule_work(&xfrm_state_gc_work); + if (sync) { + synchronize_rcu(); + ___xfrm_state_destroy(x); + } else { + spin_lock_bh(&xfrm_state_gc_lock); + hlist_add_head(&x->gclist, &xfrm_state_gc_list); + spin_unlock_bh(&xfrm_state_gc_lock); + schedule_work(&xfrm_state_gc_work); + } } EXPORT_SYMBOL(__xfrm_state_destroy); @@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool } #endif -int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) +int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync) { int i, err = 0, cnt = 0; @@ -730,7 +735,10 @@ restart: err = xfrm_state_delete(x); xfrm_audit_state_delete(x, err ? 0 : 1, task_valid); - xfrm_state_put(x); + if (sync) + xfrm_state_put_sync(x); + else + xfrm_state_put(x); if (!err) cnt++; @@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) if (atomic_read(&t->tunnel_users) == 2) xfrm_state_delete(t); atomic_dec(&t->tunnel_users); - xfrm_state_put(t); + xfrm_state_put_sync(t); x->tunnel = NULL; } } @@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net) unsigned int sz; flush_work(&net->xfrm.state_hash_work); - xfrm_state_flush(net, IPSEC_PROTO_ANY, false); flush_work(&xfrm_state_gc_work); + xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); WARN_ON(!list_empty(&net->xfrm.state_all)); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c6d26afcf89d..a131f9ff979e 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct xfrm_usersa_flush *p = nlmsg_data(nlh); int err; - err = xfrm_state_flush(net, p->proto, true); + err = xfrm_state_flush(net, p->proto, true, false); if (err) { if (err == -ESRCH) /* empty table */ return 0; diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index ca7960adf5a3..b038aa9f5a70 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -1448,13 +1448,13 @@ static int __init mbochs_dev_init(void) { int ret = 0; - ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK, MBOCHS_NAME); + ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK + 1, MBOCHS_NAME); if (ret < 0) { pr_err("Error: failed to register mbochs_dev, err: %d\n", ret); return ret; } cdev_init(&mbochs_cdev, &vd_fops); - cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK); + cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK + 1); pr_info("%s: major %d\n", __func__, MAJOR(mbochs_devt)); mbochs_class = class_create(THIS_MODULE, MBOCHS_CLASS_NAME); @@ -1483,7 +1483,7 @@ failed2: class_destroy(mbochs_class); failed1: cdev_del(&mbochs_cdev); - unregister_chrdev_region(mbochs_devt, MINORMASK); + unregister_chrdev_region(mbochs_devt, MINORMASK + 1); return ret; } @@ -1494,7 +1494,7 @@ static void __exit mbochs_dev_exit(void) device_unregister(&mbochs_dev); cdev_del(&mbochs_cdev); - unregister_chrdev_region(mbochs_devt, MINORMASK); + unregister_chrdev_region(mbochs_devt, MINORMASK + 1); class_destroy(mbochs_class); mbochs_class = NULL; } diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c index 96e7969c473a..cc86bf6566e4 100644 --- a/samples/vfio-mdev/mdpy.c +++ b/samples/vfio-mdev/mdpy.c @@ -752,13 +752,13 @@ static int __init mdpy_dev_init(void) { int ret = 0; - ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK, MDPY_NAME); + ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME); if (ret < 0) { pr_err("Error: failed to register mdpy_dev, err: %d\n", ret); return ret; } cdev_init(&mdpy_cdev, &vd_fops); - cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK); + cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1); pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt)); mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME); @@ -787,7 +787,7 @@ failed2: class_destroy(mdpy_class); failed1: cdev_del(&mdpy_cdev); - unregister_chrdev_region(mdpy_devt, MINORMASK); + unregister_chrdev_region(mdpy_devt, MINORMASK + 1); return ret; } @@ -798,7 +798,7 @@ static void __exit mdpy_dev_exit(void) device_unregister(&mdpy_dev); cdev_del(&mdpy_cdev); - unregister_chrdev_region(mdpy_devt, MINORMASK); + unregister_chrdev_region(mdpy_devt, MINORMASK + 1); class_destroy(mdpy_class); mdpy_class = NULL; } diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index f6732aa16bb1..1c77c370c92f 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -156,15 +156,15 @@ static const struct file_operations vd_fops = { /* function prototypes */ -static int mtty_trigger_interrupt(uuid_le uuid); +static int mtty_trigger_interrupt(const guid_t *uuid); /* Helper functions */ -static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid) +static struct mdev_state *find_mdev_state_by_uuid(const guid_t *uuid) { struct mdev_state *mds; list_for_each_entry(mds, &mdev_devices_list, next) { - if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0) + if (guid_equal(mdev_uuid(mds->mdev), uuid)) return mds; } @@ -1032,7 +1032,7 @@ static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags, return ret; } -static int mtty_trigger_interrupt(uuid_le uuid) +static int mtty_trigger_interrupt(const guid_t *uuid) { int ret = -1; struct mdev_state *mdev_state; @@ -1442,7 +1442,8 @@ static int __init mtty_dev_init(void) idr_init(&mtty_dev.vd_idr); - ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME); + ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1, + MTTY_NAME); if (ret < 0) { pr_err("Error: failed to register mtty_dev, err:%d\n", ret); @@ -1450,7 +1451,7 @@ static int __init mtty_dev_init(void) } cdev_init(&mtty_dev.vd_cdev, &vd_fops); - cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK); + cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1); pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt)); @@ -1487,7 +1488,7 @@ failed2: failed1: cdev_del(&mtty_dev.vd_cdev); - unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK); + unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1); all_done: return ret; @@ -1501,7 +1502,7 @@ static void __exit mtty_dev_exit(void) device_unregister(&mtty_dev.dev); idr_destroy(&mtty_dev.vd_idr); cdev_del(&mtty_dev.vd_cdev); - unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK); + unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1); class_destroy(mtty_dev.vd_class); mtty_dev.vd_class = NULL; pr_info("mtty_dev: Unloaded!\n"); diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 25c259df8ffa..6deabedc67fc 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -26,7 +26,7 @@ else CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ $(call cc-param,asan-globals=1) \ $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ - $(call cc-param,asan-stack=1) \ + $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \ $(call cc-param,asan-use-after-scope=1) \ $(call cc-param,asan-instrument-allocas=1) endif diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 77cebad0474e..f75e7bda4889 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -118,8 +118,8 @@ static int read_symbol(FILE *in, struct sym_entry *s) fprintf(stderr, "Read error or end of file.\n"); return -1; } - if (strlen(sym) > KSYM_NAME_LEN) { - fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n" + if (strlen(sym) >= KSYM_NAME_LEN) { + fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n" "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n", sym, strlen(sym), KSYM_NAME_LEN); return -1; diff --git a/security/integrity/iint.c b/security/integrity/iint.c index 88f04b3380d4..423876fca8b4 100644 --- a/security/integrity/iint.c +++ b/security/integrity/iint.c @@ -200,7 +200,7 @@ int integrity_kernel_read(struct file *file, loff_t offset, return -EBADF; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); ret = __vfs_read(file, buf, count, &offset); set_fs(old_fs); diff --git a/security/keys/internal.h b/security/keys/internal.h index 479909b858c7..8f533c81aa8d 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm) return key_task_permission(key_ref, current_cred(), perm); } -/* - * Authorisation record for request_key(). - */ -struct request_key_auth { - struct key *target_key; - struct key *dest_keyring; - const struct cred *cred; - void *callout_info; - size_t callout_len; - pid_t pid; -} __randomize_layout; - extern struct key_type key_type_request_key_auth; extern struct key *request_key_auth_new(struct key *target, + const char *op, const void *callout_info, size_t callout_len, struct key *dest_keyring); diff --git a/security/keys/key.c b/security/keys/key.c index 44a80d6741a1..696f1c092c50 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { - if (user->qnkeys + 1 >= maxkeys || - user->qnbytes + quotalen >= maxbytes || + if (user->qnkeys + 1 > maxkeys || + user->qnbytes + quotalen > maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } @@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, key->gid = gid; key->perm = perm; key->restrict_link = restrict_link; + key->last_used_at = ktime_get_real_seconds(); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index e8093d025966..7bbe03593e58 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -25,6 +25,7 @@ #include <linux/security.h> #include <linux/uio.h> #include <linux/uaccess.h> +#include <keys/request_key_auth-type.h> #include "internal.h" #define KEY_MAX_DESC_SIZE 4096 diff --git a/security/keys/keyring.c b/security/keys/keyring.c index eadebb92986a..f81372f53dd7 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring, BUG_ON((ctx->flags & STATE_CHECKS) == 0 || (ctx->flags & STATE_CHECKS) == STATE_CHECKS); - if (ctx->index_key.description) - ctx->index_key.desc_len = strlen(ctx->index_key.description); - /* Check to see if this top-level keyring is what we are looking for * and whether it is valid or not. */ @@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring, struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, + .index_key.desc_len = strlen(description), .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, diff --git a/security/keys/proc.c b/security/keys/proc.c index d2b802072693..78ac305d715e 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -165,8 +165,7 @@ static int proc_keys_show(struct seq_file *m, void *v) int rc; struct keyring_search_context ctx = { - .index_key.type = key->type, - .index_key.description = key->description, + .index_key = key->index_key, .cred = m->file->f_cred, .match_data.cmp = lookup_user_key_possessed, .match_data.raw_data = key, diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 02c77e928f68..0e0b9ccad2f8 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -19,6 +19,7 @@ #include <linux/security.h> #include <linux/user_namespace.h> #include <linux/uaccess.h> +#include <keys/request_key_auth-type.h> #include "internal.h" /* Session keyring create vs join semaphore */ diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 301f0e300dbd..7a0c6b666ff0 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -18,31 +18,30 @@ #include <linux/keyctl.h> #include <linux/slab.h> #include "internal.h" +#include <keys/request_key_auth-type.h> #define key_negative_timeout 60 /* default timeout on a negative key's existence */ /** * complete_request_key - Complete the construction of a key. - * @cons: The key construction record. + * @auth_key: The authorisation key. * @error: The success or failute of the construction. * * Complete the attempt to construct a key. The key will be negated * if an error is indicated. The authorisation key will be revoked * unconditionally. */ -void complete_request_key(struct key_construction *cons, int error) +void complete_request_key(struct key *authkey, int error) { - kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); + struct request_key_auth *rka = get_request_key_auth(authkey); + struct key *key = rka->target_key; + + kenter("%d{%d},%d", authkey->serial, key->serial, error); if (error < 0) - key_negate_and_link(cons->key, key_negative_timeout, NULL, - cons->authkey); + key_negate_and_link(key, key_negative_timeout, NULL, authkey); else - key_revoke(cons->authkey); - - key_put(cons->key); - key_put(cons->authkey); - kfree(cons); + key_revoke(authkey); } EXPORT_SYMBOL(complete_request_key); @@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp, * Request userspace finish the construction of a key * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" */ -static int call_sbin_request_key(struct key_construction *cons, - const char *op, - void *aux) +static int call_sbin_request_key(struct key *authkey, void *aux) { static char const request_key[] = "/sbin/request-key"; + struct request_key_auth *rka = get_request_key_auth(authkey); const struct cred *cred = current_cred(); key_serial_t prkey, sskey; - struct key *key = cons->key, *authkey = cons->authkey, *keyring, - *session; + struct key *key = rka->target_key, *keyring, *session; char *argv[9], *envp[3], uid_str[12], gid_str[12]; char key_str[12], keyring_str[3][12]; char desc[20]; int ret, i; - kenter("{%d},{%d},%s", key->serial, authkey->serial, op); + kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op); ret = install_user_keyrings(); if (ret < 0) @@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons, /* set up the argument list */ i = 0; argv[i++] = (char *)request_key; - argv[i++] = (char *) op; + argv[i++] = (char *)rka->op; argv[i++] = key_str; argv[i++] = uid_str; argv[i++] = gid_str; @@ -191,7 +188,7 @@ error_link: key_put(keyring); error_alloc: - complete_request_key(cons, ret); + complete_request_key(authkey, ret); kleave(" = %d", ret); return ret; } @@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring) { - struct key_construction *cons; request_key_actor_t actor; struct key *authkey; int ret; kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); - cons = kmalloc(sizeof(*cons), GFP_KERNEL); - if (!cons) - return -ENOMEM; - /* allocate an authorisation key */ - authkey = request_key_auth_new(key, callout_info, callout_len, + authkey = request_key_auth_new(key, "create", callout_info, callout_len, dest_keyring); - if (IS_ERR(authkey)) { - kfree(cons); - ret = PTR_ERR(authkey); - authkey = NULL; - } else { - cons->authkey = key_get(authkey); - cons->key = key_get(key); + if (IS_ERR(authkey)) + return PTR_ERR(authkey); - /* make the call */ - actor = call_sbin_request_key; - if (key->type->request_key) - actor = key->type->request_key; + /* Make the call */ + actor = call_sbin_request_key; + if (key->type->request_key) + actor = key->type->request_key; - ret = actor(cons, "create", aux); + ret = actor(authkey, aux); - /* check that the actor called complete_request_key() prior to - * returning an error */ - WARN_ON(ret < 0 && - !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); - key_put(authkey); - } + /* check that the actor called complete_request_key() prior to + * returning an error */ + WARN_ON(ret < 0 && + !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); + key_put(authkey); kleave(" = %d", ret); return ret; } @@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring) if (cred->request_key_auth) { authkey = cred->request_key_auth; down_read(&authkey->sem); - rka = authkey->payload.data[0]; + rka = get_request_key_auth(authkey); if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags)) dest_keyring = @@ -545,6 +531,7 @@ struct key *request_key_and_link(struct key_type *type, struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, + .index_key.desc_len = strlen(description), .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 87ea2f54dedc..bda6201c6c45 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -17,7 +17,7 @@ #include <linux/slab.h> #include <linux/uaccess.h> #include "internal.h" -#include <keys/user-type.h> +#include <keys/request_key_auth-type.h> static int request_key_auth_preparse(struct key_preparsed_payload *); static void request_key_auth_free_preparse(struct key_preparsed_payload *); @@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key, static void request_key_auth_describe(const struct key *key, struct seq_file *m) { - struct request_key_auth *rka = key->payload.data[0]; + struct request_key_auth *rka = get_request_key_auth(key); seq_puts(m, "key:"); seq_puts(m, key->description); @@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key, static long request_key_auth_read(const struct key *key, char __user *buffer, size_t buflen) { - struct request_key_auth *rka = key->payload.data[0]; + struct request_key_auth *rka = get_request_key_auth(key); size_t datalen; long ret; @@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key, */ static void request_key_auth_revoke(struct key *key) { - struct request_key_auth *rka = key->payload.data[0]; + struct request_key_auth *rka = get_request_key_auth(key); kenter("{%d}", key->serial); @@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka) */ static void request_key_auth_destroy(struct key *key) { - struct request_key_auth *rka = key->payload.data[0]; + struct request_key_auth *rka = get_request_key_auth(key); kenter("{%d}", key->serial); @@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key) * Create an authorisation token for /sbin/request-key or whoever to gain * access to the caller's security data. */ -struct key *request_key_auth_new(struct key *target, const void *callout_info, - size_t callout_len, struct key *dest_keyring) +struct key *request_key_auth_new(struct key *target, const char *op, + const void *callout_info, size_t callout_len, + struct key *dest_keyring) { struct request_key_auth *rka, *irka; const struct cred *cred = current->cred; @@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, if (!rka->callout_info) goto error_free_rka; rka->callout_len = callout_len; + strlcpy(rka->op, op, sizeof(rka->op)); /* see if the calling process is already servicing the key request of * another process */ @@ -245,7 +247,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id) struct key *authkey; key_ref_t authkey_ref; - sprintf(description, "%x", target_id); + ctx.index_key.desc_len = sprintf(description, "%x", target_id); authkey_ref = search_process_keyrings(&ctx); diff --git a/security/lsm_audit.c b/security/lsm_audit.c index f84001019356..33028c098ef3 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, if (a->u.net->sk) { struct sock *sk = a->u.net->sk; struct unix_sock *u; + struct unix_address *addr; int len = 0; char *p = NULL; @@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab, #endif case AF_UNIX: u = unix_sk(sk); + addr = smp_load_acquire(&u->addr); + if (!addr) + break; if (u->path.dentry) { audit_log_d_path(ab, " path=", &u->path); break; } - if (!u->addr) - break; - len = u->addr->len-sizeof(short); - p = &u->addr->name->sun_path[0]; + len = addr->len-sizeof(short); + p = &addr->name->sun_path[0]; audit_log_format(ab, " path="); if (*p) audit_log_untrustedstring(ab, p); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6df758adff84..1ffa36e987b4 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1855,6 +1855,8 @@ enum { ALC887_FIXUP_BASS_CHMAP, ALC1220_FIXUP_GB_DUAL_CODECS, ALC1220_FIXUP_CLEVO_P950, + ALC1220_FIXUP_SYSTEM76_ORYP5, + ALC1220_FIXUP_SYSTEM76_ORYP5_PINS, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, snd_hda_override_conn_list(codec, 0x1b, 1, conn1); } +static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, + const struct hda_fixup *fix, int action); + +static void alc1220_fixup_system76_oryp5(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + alc1220_fixup_clevo_p950(codec, fix, action); + alc_fixup_headset_mode_no_hp_mic(codec, fix, action); +} + static const struct hda_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { .type = HDA_FIXUP_PINS, @@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_clevo_p950, }, + [ALC1220_FIXUP_SYSTEM76_ORYP5] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc1220_fixup_system76_oryp5, + }, + [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + {} + }, + .chained = true, + .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5, + }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), + SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), @@ -5632,6 +5660,7 @@ enum { ALC294_FIXUP_ASUS_SPK, ALC225_FIXUP_HEADSET_JACK, ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, + ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, }; static const struct hda_fixup alc269_fixups[] = { @@ -6587,6 +6616,17 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, + [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* Disable PCBEEP-IN passthrough */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 }, + { } + }, + .chained = true, + .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7272,7 +7312,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60130}, {0x19, 0x03a11020}, {0x21, 0x0321101f}), - SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, {0x12, 0x90a60130}, {0x14, 0x90170110}, {0x19, 0x04a11040}, diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 37e001cf9cd1..3fe34417ec89 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -462,7 +462,7 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv) conf_idx = 0; node = of_get_child_by_name(top, PREFIX "dai-link"); if (!node) { - node = dev->of_node; + node = of_node_get(top); loop = 0; } diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index ce00fe2f6aae..d4bde4834ce5 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -604,6 +604,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct i2s_dai *i2s = to_info(dai); + struct i2s_dai *other = get_other_dai(i2s); int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; u32 mod, tmp = 0; unsigned long flags; @@ -661,7 +662,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any * clock configuration assigned in DT is not overwritten. */ - if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL) + if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL && + other->clk_data.clks == NULL) i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, 0, SND_SOC_CLOCK_IN); break; @@ -699,6 +701,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); + struct i2s_dai *other = get_other_dai(i2s); u32 mod, mask = 0, val = 0; struct clk *rclksrc; unsigned long flags; @@ -784,6 +787,9 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, i2s->frmclk = params_rate(params); rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; + if (!rclksrc || IS_ERR(rclksrc)) + rclksrc = other->clk_table[CLK_I2S_RCLK_SRC]; + if (rclksrc && !IS_ERR(rclksrc)) i2s->rclk_srcrate = clk_get_rate(rclksrc); diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index fc79ec6927e3..731b963b6995 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -2487,6 +2487,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) { struct soc_tplg tplg; + int ret; /* setup parsing context */ memset(&tplg, 0, sizeof(tplg)); @@ -2500,7 +2501,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, tplg.bytes_ext_ops = ops->bytes_ext_ops; tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; - return soc_tplg_load(&tplg); + ret = soc_tplg_load(&tplg); + /* free the created components if fail to load topology */ + if (ret) + snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL); + + return ret; } EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 147e34cfceb7..02d7c871862a 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -474,6 +474,16 @@ static void test_lpm_delete(void) assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && errno == ENOENT); + key->prefixlen = 30; // unused prefix so far + inet_pton(AF_INET, "192.255.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == -1 && + errno == ENOENT); + + key->prefixlen = 16; // same prefix as the root node + inet_pton(AF_INET, "192.255.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == -1 && + errno == ENOENT); + /* assert initial lookup */ key->prefixlen = 32; inet_pton(AF_INET, "192.168.0.1", key->data); diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 802b4af18729..1080ff55a788 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -388,6 +388,7 @@ fib_carrier_unicast_test() set -e $IP link set dev dummy0 carrier off + sleep 1 set +e echo " Carrier down" diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index e2c94e47707c..912b2dc50be3 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -103,6 +103,15 @@ # and check that configured MTU is used on link creation and changes, and # that MTU is properly calculated instead when MTU is not configured from # userspace +# +# - cleanup_ipv4_exception +# Similar to pmtu_ipv4_vxlan4_exception, but explicitly generate PMTU +# exceptions on multiple CPUs and check that the veth device tear-down +# happens in a timely manner +# +# - cleanup_ipv6_exception +# Same as above, but use IPv6 transport from A to B + # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 @@ -135,7 +144,9 @@ tests=" pmtu_vti6_default_mtu vti6: default MTU assignment pmtu_vti4_link_add_mtu vti4: MTU setting on link creation pmtu_vti6_link_add_mtu vti6: MTU setting on link creation - pmtu_vti6_link_change_mtu vti6: MTU changes on link changes" + pmtu_vti6_link_change_mtu vti6: MTU changes on link changes + cleanup_ipv4_exception ipv4: cleanup of cached exceptions + cleanup_ipv6_exception ipv6: cleanup of cached exceptions" NS_A="ns-$(mktemp -u XXXXXX)" NS_B="ns-$(mktemp -u XXXXXX)" @@ -263,8 +274,6 @@ setup_fou_or_gue() { ${ns_a} ip link set ${encap}_a up ${ns_b} ip link set ${encap}_b up - - sleep 1 } setup_fou44() { @@ -302,6 +311,10 @@ setup_gue66() { setup_namespaces() { for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do ip netns add ${n} || return 1 + + # Disable DAD, so that we don't have to wait to use the + # configured IPv6 addresses + ip netns exec ${n} sysctl -q net/ipv6/conf/default/accept_dad=0 done } @@ -337,8 +350,6 @@ setup_vti() { ${ns_a} ip link set vti${proto}_a up ${ns_b} ip link set vti${proto}_b up - - sleep 1 } setup_vti4() { @@ -375,8 +386,6 @@ setup_vxlan_or_geneve() { ${ns_a} ip link set ${type}_a up ${ns_b} ip link set ${type}_b up - - sleep 1 } setup_geneve4() { @@ -588,8 +597,8 @@ test_pmtu_ipvX() { mtu "${ns_b}" veth_B-R2 1500 # Create route exceptions - ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst1} > /dev/null - ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst2} > /dev/null + ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} > /dev/null + ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} > /dev/null # Check that exceptions have been created with the correct PMTU pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})" @@ -621,7 +630,7 @@ test_pmtu_ipvX() { # Decrease remote MTU on path via R2, get new exception mtu "${ns_r2}" veth_R2-B 400 mtu "${ns_b}" veth_B-R2 400 - ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null + ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1 @@ -638,7 +647,7 @@ test_pmtu_ipvX() { check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1 # Get new exception - ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null + ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1 } @@ -687,7 +696,7 @@ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() { mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000)) mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000)) - ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null + ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null # Check that exception was created pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})" @@ -767,7 +776,7 @@ test_pmtu_ipvX_over_fouY_or_gueY() { mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000)) mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000)) - ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null + ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null # Check that exception was created pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})" @@ -825,13 +834,13 @@ test_pmtu_vti4_exception() { # Send DF packet without exceeding link layer MTU, check that no # exception is created - ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null + ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})" check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1 # Now exceed link layer MTU by one byte, check that exception is created # with the right PMTU value - ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null + ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})" check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))" } @@ -847,7 +856,7 @@ test_pmtu_vti6_exception() { mtu "${ns_b}" veth_b 4000 mtu "${ns_a}" vti6_a 5000 mtu "${ns_b}" vti6_b 5000 - ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${tunnel6_b_addr} > /dev/null + ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr} > /dev/null # Check that exception was created pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})" @@ -1008,6 +1017,61 @@ test_pmtu_vti6_link_change_mtu() { return ${fail} } +check_command() { + cmd=${1} + + if ! which ${cmd} > /dev/null 2>&1; then + err " missing required command: '${cmd}'" + return 1 + fi + return 0 +} + +test_cleanup_vxlanX_exception() { + outer="${1}" + encap="vxlan" + ll_mtu=4000 + + check_command taskset || return 2 + cpu_list=$(grep -m 2 processor /proc/cpuinfo | cut -d ' ' -f 2) + + setup namespaces routing ${encap}${outer} || return 2 + trace "${ns_a}" ${encap}_a "${ns_b}" ${encap}_b \ + "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \ + "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B + + # Create route exception by exceeding link layer MTU + mtu "${ns_a}" veth_A-R1 $((${ll_mtu} + 1000)) + mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000)) + mtu "${ns_b}" veth_B-R1 ${ll_mtu} + mtu "${ns_r1}" veth_R1-B ${ll_mtu} + + mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000)) + mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000)) + + # Fill exception cache for multiple CPUs (2) + # we can always use inner IPv4 for that + for cpu in ${cpu_list}; do + taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr} > /dev/null + done + + ${ns_a} ip link del dev veth_A-R1 & + iplink_pid=$! + sleep 1 + if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then + err " can't delete veth device in a timely manner, PMTU dst likely leaked" + return 1 + fi +} + +test_cleanup_ipv6_exception() { + test_cleanup_vxlanX_exception 6 +} + +test_cleanup_ipv4_exception() { + test_cleanup_vxlanX_exception 4 +} + usage() { echo echo "$0 [OPTIONS] [TEST]..." diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh index aeac53a99aeb..ac2a30be9b32 100755 --- a/tools/testing/selftests/net/udpgro.sh +++ b/tools/testing/selftests/net/udpgro.sh @@ -37,7 +37,7 @@ run_one() { cfg_veth - ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \ + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \ echo "ok" || \ echo "failed" & @@ -81,7 +81,7 @@ run_one_nat() { # will land on the 'plain' one ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 & pid=$! - ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${family} -b ${addr2%/*} ${rx_args} && \ + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \ echo "ok" || \ echo "failed"& @@ -99,8 +99,8 @@ run_one_2sock() { cfg_veth - ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -p 12345 & - ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \ + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 & + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \ echo "ok" || \ echo "failed" & diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c index 0c960f673324..db3d4a8b5a4c 100644 --- a/tools/testing/selftests/net/udpgso_bench_rx.c +++ b/tools/testing/selftests/net/udpgso_bench_rx.c @@ -45,6 +45,8 @@ static int cfg_alen = sizeof(struct sockaddr_in6); static int cfg_expected_pkt_nr; static int cfg_expected_pkt_len; static int cfg_expected_gso_size; +static int cfg_connect_timeout_ms; +static int cfg_rcv_timeout_ms; static struct sockaddr_storage cfg_bind_addr; static bool interrupted; @@ -87,7 +89,7 @@ static unsigned long gettimeofday_ms(void) return (tv.tv_sec * 1000) + (tv.tv_usec / 1000); } -static void do_poll(int fd) +static void do_poll(int fd, int timeout_ms) { struct pollfd pfd; int ret; @@ -102,8 +104,16 @@ static void do_poll(int fd) break; if (ret == -1) error(1, errno, "poll"); - if (ret == 0) - continue; + if (ret == 0) { + if (!timeout_ms) + continue; + + timeout_ms -= 10; + if (timeout_ms <= 0) { + interrupted = true; + break; + } + } if (pfd.revents != POLLIN) error(1, errno, "poll: 0x%x expected 0x%x\n", pfd.revents, POLLIN); @@ -134,7 +144,7 @@ static int do_socket(bool do_tcp) if (listen(accept_fd, 1)) error(1, errno, "listen"); - do_poll(accept_fd); + do_poll(accept_fd, cfg_connect_timeout_ms); if (interrupted) exit(0); @@ -273,7 +283,9 @@ static void do_flush_udp(int fd) static void usage(const char *filepath) { - error(1, 0, "Usage: %s [-Grtv] [-b addr] [-p port] [-l pktlen] [-n packetnr] [-S gsosize]", filepath); + error(1, 0, "Usage: %s [-C connect_timeout] [-Grtv] [-b addr] [-p port]" + " [-l pktlen] [-n packetnr] [-R rcv_timeout] [-S gsosize]", + filepath); } static void parse_opts(int argc, char **argv) @@ -282,7 +294,7 @@ static void parse_opts(int argc, char **argv) /* bind to any by default */ setup_sockaddr(PF_INET6, "::", &cfg_bind_addr); - while ((c = getopt(argc, argv, "4b:Gl:n:p:rS:tv")) != -1) { + while ((c = getopt(argc, argv, "4b:C:Gl:n:p:rR:S:tv")) != -1) { switch (c) { case '4': cfg_family = PF_INET; @@ -292,6 +304,9 @@ static void parse_opts(int argc, char **argv) case 'b': setup_sockaddr(cfg_family, optarg, &cfg_bind_addr); break; + case 'C': + cfg_connect_timeout_ms = strtoul(optarg, NULL, 0); + break; case 'G': cfg_gro_segment = true; break; @@ -307,6 +322,9 @@ static void parse_opts(int argc, char **argv) case 'r': cfg_read_all = true; break; + case 'R': + cfg_rcv_timeout_ms = strtoul(optarg, NULL, 0); + break; case 'S': cfg_expected_gso_size = strtol(optarg, NULL, 0); break; @@ -329,8 +347,9 @@ static void parse_opts(int argc, char **argv) static void do_recv(void) { + int timeout_ms = cfg_tcp ? cfg_rcv_timeout_ms : cfg_connect_timeout_ms; unsigned long tnow, treport; - int fd, loop = 0; + int fd; fd = do_socket(cfg_tcp); @@ -342,12 +361,7 @@ static void do_recv(void) treport = gettimeofday_ms() + 1000; do { - /* force termination after the second poll(); this cope both - * with sender slower than receiver and missing packet errors - */ - if (cfg_expected_pkt_nr && loop++) - interrupted = true; - do_poll(fd); + do_poll(fd, timeout_ms); if (cfg_tcp) do_flush_tcp(fd); @@ -365,6 +379,8 @@ static void do_recv(void) treport = tnow + 1000; } + timeout_ms = cfg_rcv_timeout_ms; + } while (!interrupted); if (cfg_expected_pkt_nr && (packets != cfg_expected_pkt_nr)) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 585845203db8..076bc38963bf 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4044,7 +4044,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) } add_uevent_var(env, "PID=%d", kvm->userspace_pid); - if (kvm->debugfs_dentry) { + if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) { char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL); if (p) { |