summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c99
-rw-r--r--drivers/clk/at91/at91sam9x5.c5
-rw-r--r--drivers/clk/at91/sama5d2.c4
-rw-r--r--drivers/clk/at91/sama5d4.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/crypto/ccree/cc_pm.h2
-rw-r--r--drivers/gpio/gpio-mt7621.c20
-rw-r--r--drivers/gpio/gpio-pxa.c1
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/i3c/master/dw-i3c-master.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c15
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c10
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox.c1
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/block.c27
-rw-r--r--drivers/mmc/core/core.c338
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/host.c48
-rw-r--r--drivers/mmc/core/mmc.c10
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/queue.c9
-rw-r--r--drivers/mmc/core/regulator.c260
-rw-r--r--drivers/mmc/core/sd.c20
-rw-r--r--drivers/mmc/core/sd_ops.c33
-rw-r--r--drivers/mmc/core/sd_ops.h3
-rw-r--r--drivers/mmc/core/sdio.c9
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/core/sdio_io.c29
-rw-r--r--drivers/mmc/core/sdio_ops.h1
-rw-r--r--drivers/mmc/core/slot-gpio.c9
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/atmel-mci.c8
-rw-r--r--drivers/mmc/host/bcm2835.c23
-rw-r--r--drivers/mmc/host/cb710-mmc.c42
-rw-r--r--drivers/mmc/host/cqhci.c13
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/jz4740_mmc.c73
-rw-r--r--drivers/mmc/host/mmc_spi.c3
-rw-r--r--drivers/mmc/host/mmci.c27
-rw-r--r--drivers/mmc/host/mmci.h1
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/of_mmc_spi.c22
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c19
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c1
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c2
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c4
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c310
-rw-r--r--drivers/mmc/host/sdhci-omap.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c10
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c140
-rw-r--r--drivers/mmc/host/sdhci-pci.h6
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c1
-rw-r--r--drivers/mmc/host/sdhci-tegra.c286
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c2
-rw-r--r--drivers/mmc/host/sdhci.c27
-rw-r--r--drivers/mmc/host/sdhci.h6
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.h5
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c20
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c3
-rw-r--r--drivers/mtd/chips/gen_probe.c2
-rw-r--r--drivers/mtd/devices/docg3.c7
-rw-r--r--drivers/mtd/devices/m25p80.c9
-rw-r--r--drivers/mtd/devices/mtdram.c2
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c4
-rw-r--r--drivers/mtd/mtdcore.c84
-rw-r--r--drivers/mtd/nand/raw/Kconfig17
-rw-r--r--drivers/mtd/nand/raw/Makefile2
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c21
-rw-r--r--drivers/mtd/nand/raw/denali.c42
-rw-r--r--drivers/mtd/nand/raw/denali.h1
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c27
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c16
-rw-r--r--drivers/mtd/nand/raw/jz4780_bch.c9
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c5
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c1464
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c8
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c3
-rw-r--r--drivers/mtd/nand/raw/nand_base.c159
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c20
-rw-r--r--drivers/mtd/nand/raw/r852.c3
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c2073
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c732
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c21
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c83
-rw-r--r--drivers/mtd/nand/spi/macronix.c8
-rw-r--r--drivers/mtd/nand/spi/toshiba.c79
-rw-r--r--drivers/mtd/spi-nor/Kconfig25
-rw-r--r--drivers/mtd/spi-nor/Makefile3
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c74
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c1224
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c3
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c35
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/dsa/b53/b53_common.c90
-rw-r--r--drivers/net/dsa/b53/b53_priv.h3
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/lantiq_gswip.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h13
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c149
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c128
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c15
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c12
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c22
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/geneve.c11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c22
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/phy/dp83867.c3
-rw-r--r--drivers/net/phy/marvell10g.c6
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/micrel.c13
-rw-r--r--drivers/net/phy/phylink.c4
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c5
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/r8152.c5
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c46
-rw-r--r--drivers/net/xen-netback/hash.c2
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c10
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/regulator/88pm8607.c38
-rw-r--r--drivers/regulator/Kconfig27
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/act8865-regulator.c5
-rw-r--r--drivers/regulator/act8945a-regulator.c11
-rw-r--r--drivers/regulator/arizona-ldo1.c56
-rw-r--r--drivers/regulator/as3722-regulator.c2
-rw-r--r--drivers/regulator/axp20x-regulator.c27
-rw-r--r--drivers/regulator/bcm590xx-regulator.c4
-rw-r--r--drivers/regulator/bd70528-regulator.c289
-rw-r--r--drivers/regulator/bd718x7-regulator.c215
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c8
-rw-r--r--drivers/regulator/core.c88
-rw-r--r--drivers/regulator/cpcap-regulator.c106
-rw-r--r--drivers/regulator/da9052-regulator.c64
-rw-r--r--drivers/regulator/da9055-regulator.c46
-rw-r--r--drivers/regulator/da9062-regulator.c37
-rw-r--r--drivers/regulator/da9063-regulator.c37
-rw-r--r--drivers/regulator/da9210-regulator.c53
-rw-r--r--drivers/regulator/fan53555.c109
-rw-r--r--drivers/regulator/fixed.c35
-rw-r--r--drivers/regulator/gpio-regulator.c194
-rw-r--r--drivers/regulator/helpers.c125
-rw-r--r--drivers/regulator/hi655x-regulator.c1
-rw-r--r--drivers/regulator/isl6271a-regulator.c13
-rw-r--r--drivers/regulator/lm363x-regulator.c6
-rw-r--r--drivers/regulator/lochnagar-regulator.c7
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/regulator/lp3972.c4
-rw-r--r--drivers/regulator/lp872x.c82
-rw-r--r--drivers/regulator/lp873x-regulator.c51
-rw-r--r--drivers/regulator/lp8755.c6
-rw-r--r--drivers/regulator/lp87565-regulator.c4
-rw-r--r--drivers/regulator/lp8788-buck.c40
-rw-r--r--drivers/regulator/lp8788-ldo.c4
-rw-r--r--drivers/regulator/ltc3676.c65
-rw-r--r--drivers/regulator/max14577-regulator.c1
-rw-r--r--drivers/regulator/max77620-regulator.c12
-rw-r--r--drivers/regulator/max77650-regulator.c498
-rw-r--r--drivers/regulator/max77802-regulator.c6
-rw-r--r--drivers/regulator/mc13783-regulator.c82
-rw-r--r--drivers/regulator/mc13892-regulator.c72
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c4
-rw-r--r--drivers/regulator/mc13xxx.h24
-rw-r--r--drivers/regulator/mcp16502.c2
-rw-r--r--drivers/regulator/mt6311-regulator.c10
-rw-r--r--drivers/regulator/of_regulator.c4
-rw-r--r--drivers/regulator/palmas-regulator.c2
-rw-r--r--drivers/regulator/pv88060-regulator.c62
-rw-r--r--drivers/regulator/pv88080-regulator.c55
-rw-r--r--drivers/regulator/pv88090-regulator.c53
-rw-r--r--drivers/regulator/pwm-regulator.c17
-rw-r--r--drivers/regulator/qcom_smd-regulator.c104
-rw-r--r--drivers/regulator/rk808-regulator.c64
-rw-r--r--drivers/regulator/rt5033-regulator.c4
-rw-r--r--drivers/regulator/s2mpa01.c10
-rw-r--r--drivers/regulator/s2mps11.c6
-rw-r--r--drivers/regulator/s5m8767.c8
-rw-r--r--drivers/regulator/stm32-vrefbuf.c121
-rw-r--r--drivers/regulator/stpmic1_regulator.c300
-rw-r--r--drivers/regulator/tps65218-regulator.c23
-rw-r--r--drivers/regulator/twl-regulator.c7
-rw-r--r--drivers/regulator/twl6030-regulator.c88
-rw-r--r--drivers/regulator/uniphier-regulator.c8
-rw-r--r--drivers/regulator/wm831x-dcdc.c85
-rw-r--r--drivers/scsi/3w-9xxx.c14
-rw-r--r--drivers/scsi/3w-sas.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c8
-rw-r--r--drivers/scsi/bfa/bfad.c18
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c8
-rw-r--r--drivers/scsi/hptiop.c10
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c19
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/spi/Kconfig36
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/atmel-quadspi.c270
-rw-r--r--drivers/spi/spi-ath79.c114
-rw-r--r--drivers/spi/spi-atmel.c102
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-bitbang.c13
-rw-r--r--drivers/spi/spi-cadence.c84
-rw-r--r--drivers/spi/spi-clps711x.c23
-rw-r--r--drivers/spi/spi-davinci.c54
-rw-r--r--drivers/spi/spi-dw-mmio.c22
-rw-r--r--drivers/spi/spi-dw.c54
-rw-r--r--drivers/spi/spi-fsl-dspi.c42
-rw-r--r--drivers/spi/spi-fsl-lpspi.c92
-rw-r--r--drivers/spi/spi-fsl-qspi.c966
-rw-r--r--drivers/spi/spi-geni-qcom.c56
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-mem.c72
-rw-r--r--drivers/spi/spi-mxs.c5
-rw-r--r--drivers/spi/spi-npcm-pspi.c3
-rw-r--r--drivers/spi/spi-nxp-fspi.c1106
-rw-r--r--drivers/spi/spi-omap2-mcspi.c4
-rw-r--r--drivers/spi/spi-pl022.c30
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c58
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c157
-rw-r--r--drivers/spi/spi-pxa2xx.h4
-rw-r--r--drivers/spi/spi-rspi.c170
-rw-r--r--drivers/spi/spi-sh-hspi.c39
-rw-r--r--drivers/spi/spi-sh-msiof.c184
-rw-r--r--drivers/spi/spi-sifive.c448
-rw-r--r--drivers/spi/spi-sprd.c354
-rw-r--r--drivers/spi/spi-stm32.c1403
-rw-r--r--drivers/spi/spi-ti-qspi.c6
-rw-r--r--drivers/spi/spi-topcliff-pch.c6
-rw-r--r--drivers/spi/spi.c115
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c4
-rw-r--r--drivers/tee/optee/core.c4
-rw-r--r--drivers/vfio/mdev/mdev_core.c16
-rw-r--r--drivers/vfio/mdev/mdev_private.h5
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c6
-rw-r--r--drivers/vfio/pci/vfio_pci.c90
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h6
-rw-r--r--drivers/vfio/platform/reset/Makefile2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/vfio.c8
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c3
-rw-r--r--drivers/vhost/vhost.c2
316 files changed, 13650 insertions, 5633 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0ea2139c50d8..ccd296dbb95c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -95,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
- hrtimer_cancel(&dev->power.suspend_timer);
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 2e8f0144f9ab..9cbb4b0cd01b 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -33,7 +33,7 @@ struct regcache_rbtree_node {
unsigned int blklen;
/* the actual rbtree node holding this block */
struct rb_node node;
-} __attribute__ ((packed));
+};
struct regcache_rbtree_ctx {
struct rb_root root;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 330c1f7e9665..5059748afd4c 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -35,6 +35,7 @@ struct regmap_irq_chip_data {
int wake_count;
void *status_reg_buf;
+ unsigned int *main_status_buf;
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
@@ -329,6 +330,33 @@ static const struct irq_chip regmap_irq_chip = {
.irq_set_wake = regmap_irq_set_wake,
};
+static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
+ unsigned int b)
+{
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ struct regmap_irq_sub_irq_map *subreg;
+ int i, ret = 0;
+
+ if (!chip->sub_reg_offsets) {
+ /* Assume linear mapping */
+ ret = regmap_read(map, chip->status_base +
+ (b * map->reg_stride * data->irq_reg_stride),
+ &data->status_buf[b]);
+ } else {
+ subreg = &chip->sub_reg_offsets[b];
+ for (i = 0; i < subreg->num_regs; i++) {
+ unsigned int offset = subreg->offset[i];
+
+ ret = regmap_read(map, chip->status_base + offset,
+ &data->status_buf[offset]);
+ if (ret)
+ break;
+ }
+ }
+ return ret;
+}
+
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
struct regmap_irq_chip_data *data = d;
@@ -352,11 +380,65 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
/*
- * Read in the statuses, using a single bulk read if possible
- * in order to reduce the I/O overheads.
+ * Read only registers with active IRQs if the chip has 'main status
+ * register'. Else read in the statuses, using a single bulk read if
+ * possible in order to reduce the I/O overheads.
*/
- if (!map->use_single_read && map->reg_stride == 1 &&
- data->irq_reg_stride == 1) {
+
+ if (chip->num_main_regs) {
+ unsigned int max_main_bits;
+ unsigned long size;
+
+ size = chip->num_regs * sizeof(unsigned int);
+
+ max_main_bits = (chip->num_main_status_bits) ?
+ chip->num_main_status_bits : chip->num_regs;
+ /* Clear the status buf as we don't read all status regs */
+ memset(data->status_buf, 0, size);
+
+ /* We could support bulk read for main status registers
+ * but I don't expect to see devices with really many main
+ * status registers so let's only support single reads for the
+ * sake of simplicity. and add bulk reads only if needed
+ */
+ for (i = 0; i < chip->num_main_regs; i++) {
+ ret = regmap_read(map, chip->main_status +
+ (i * map->reg_stride
+ * data->irq_reg_stride),
+ &data->main_status_buf[i]);
+ if (ret) {
+ dev_err(map->dev,
+ "Failed to read IRQ status %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ /* Read sub registers with active IRQs */
+ for (i = 0; i < chip->num_main_regs; i++) {
+ unsigned int b;
+ const unsigned long mreg = data->main_status_buf[i];
+
+ for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
+ if (i * map->format.val_bytes * 8 + b >
+ max_main_bits)
+ break;
+ ret = read_sub_irq_data(data, b);
+
+ if (ret != 0) {
+ dev_err(map->dev,
+ "Failed to read IRQ status %d\n",
+ ret);
+ if (chip->runtime_pm)
+ pm_runtime_put(map->dev);
+ goto exit;
+ }
+ }
+
+ }
+ } else if (!map->use_single_read && map->reg_stride == 1 &&
+ data->irq_reg_stride == 1) {
+
u8 *buf8 = data->status_reg_buf;
u16 *buf16 = data->status_reg_buf;
u32 *buf32 = data->status_reg_buf;
@@ -521,6 +603,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d)
return -ENOMEM;
+ if (chip->num_main_regs) {
+ d->main_status_buf = kcalloc(chip->num_main_regs,
+ sizeof(unsigned int),
+ GFP_KERNEL);
+
+ if (!d->main_status_buf)
+ goto err_alloc;
+ }
+
d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
GFP_KERNEL);
if (!d->status_buf)
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 2fe225a697df..3487e03d4bc6 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
return;
at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
- nck(at91sam9x5_systemck),
- nck(at91sam9x35_periphck), 0);
+ nck(at91sam9x5_systemck), 31, 0);
if (!at91sam9x5_pmc)
return;
@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 2; i++) {
char name[6];
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index d69ad96fe988..cd0ef7274fdb 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 3; i++) {
char name[6];
@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index e358be7f6c8d..b645a9d59cdb 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 3; i++) {
char name[6];
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 3b97f60540ad..609970c0b666 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
0x060, BIT(10), 0);
static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
- 0x060, BIT(12), 0);
+ 0x060, BIT(11), 0);
static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
- 0x060, BIT(13), 0);
+ 0x060, BIT(12), 0);
static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
0x060, BIT(13), 0);
static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 621b1cd996db..ac12f261f8ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
[RST_BUS_VE] = { 0x2c4, BIT(0) },
- [RST_BUS_TCON0] = { 0x2c4, BIT(3) },
+ [RST_BUS_TCON0] = { 0x2c4, BIT(4) },
[RST_BUS_CSI] = { 0x2c4, BIT(8) },
[RST_BUS_DE] = { 0x2c4, BIT(12) },
[RST_BUS_DBG] = { 0x2c4, BIT(31) },
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 242c3370544e..9ed46d188cb5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- kfree(priv);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ kfree(priv);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index f62624357020..907a6db4d6c0 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -30,7 +30,7 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
return 0;
}
-static void cc_pm_go(struct cc_drvdata *drvdata) {}
+static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 00e954f22bc9..74401e0adb29 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -30,6 +30,7 @@
#define GPIO_REG_EDGE 0xA0
struct mtk_gc {
+ struct irq_chip irq_chip;
struct gpio_chip chip;
spinlock_t lock;
int bank;
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip mediatek_gpio_irq_chip = {
- .irq_unmask = mediatek_gpio_irq_unmask,
- .irq_mask = mediatek_gpio_irq_mask,
- .irq_mask_ack = mediatek_gpio_irq_mask,
- .irq_set_type = mediatek_gpio_irq_type,
-};
-
static int
mediatek_gpio_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec, u32 *flags)
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
+ rg->irq_chip.name = dev_name(dev);
+ rg->irq_chip.parent_device = dev;
+ rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
+ rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
+ rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
+ rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
+
if (mtk->gpio_irq) {
/*
* Manually request the irq here instead of passing
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
- ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+ ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
0, handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add gpiochip_irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+ gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
mtk->gpio_irq, NULL);
}
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
mtk->gpio_irq = irq_of_parse_and_map(np, 0);
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
- mediatek_gpio_irq_chip.name = dev_name(dev);
for (i = 0; i < MTK_BANK_CNT; i++) {
ret = mediatek_gpio_bank_probe(dev, np, i);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index e9600b556f39..bcc6be4a5cb2 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
{
switch (gpio_type) {
case PXA3XX_GPIO:
+ case MMP2_GPIO:
return false;
default:
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a6e1891217e2..c34eb9d9c59a 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -125,7 +125,7 @@ static void of_gpio_flags_quirks(struct device_node *np,
for_each_child_of_node(np, child) {
ret = of_property_read_u32(child, "reg", &cs);
- if (!ret)
+ if (ret)
continue;
if (cs == index) {
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bc62bf41b7e9..5dc349173e4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index aadd0fa42e43..3aa42c64484a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -405,6 +405,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
+ u64 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7c108e687683..698bcb8ce61d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -638,12 +638,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
+#if 0
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
return;
}
+#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 6811a5d05b27..aa2f71cc1eba 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
{
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0b392bfca284..636d14a60952 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
+ /* Update to correct count(s) if racing with vblank irq */
+ amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* wake up userspace */
if (amdgpu_crtc->event) {
- /* Update to correct count(s) if racing with vblank irq */
- drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
-
drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
/* page flip completed. clean up */
@@ -786,12 +785,13 @@ static int dm_suspend(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
s3_handle_mst(adev->ddev, true);
amdgpu_dm_irq_suspend(adev);
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
@@ -4827,6 +4827,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0;
unsigned long flags;
+ u64 last_flip_vblank;
+ bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
/* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
@@ -4858,6 +4860,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* In commit tail framework this cannot happen */
WARN_ON(1);
}
+
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr flips per
+ * video frame by use of throttling, but allow flip programming
+ * anywhere in the possibly large variable vrr vblank interval
+ * for fine-grained flip timing control and more opportunity to
+ * avoid stutter on late submission of amdgpu_dm_do_flip() calls.
+ */
+ last_flip_vblank = acrtc_attach->last_flip_vblank;
+
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
@@ -4881,10 +4893,18 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
drm_crtc_vblank_get(crtc);
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., clients
+ * using GLX_OML_sync_control extension.
+ */
+ if (!vrr_active)
+ last_flip_vblank = drm_crtc_vblank_count(crtc);
+
amdgpu_dm_do_flip(
crtc,
fb,
- (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ (uint32_t) last_flip_vblank + *wait_for_vblank,
dc_state);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 19801bdba0d2..7a72ee46f14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -662,6 +662,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
+ int patched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
/* get max clock state from PPLIB */
@@ -671,9 +676,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
- context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
- clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+ clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index acd418515346..a6b80fdaa666 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -37,6 +37,10 @@ void dce100_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dce100_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
struct dc_bios *dcb,
enum pipe_gating_control power_gating);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index a60a90e68d91..c4543178ba20 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
- dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index cdd1d6b7b9f2..4e9ea50141bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -790,9 +790,22 @@ bool dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- /* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 58a12ddf12f3..41883c981789 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2658,8 +2658,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x;
- pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y;
+ pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
+ pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index f3dd66ae990a..aa35007262cd 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -154,6 +154,10 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (IS_ERR(dev))
return PTR_ERR(dev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free_dev;
+
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 54e2ae614dcc..f4290f6b0c38 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1602,6 +1602,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
+ /*
+ * FIXME: Since prepare_fb and cleanup_fb are always called on
+ * the new_plane_state for async updates we need to block framebuffer
+ * changes. This prevents use of a fb that's been cleaned up and
+ * double cleanups from occuring.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 7f365ac0b549..4ee16b264dbe 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
- mask = GENMASK(count - 1, 0);
+ conn_seq = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
- conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -371,7 +370,8 @@ retry:
if (conn_configured & BIT(i))
continue;
- if (conn_seq == 0 && !connector->has_tile)
+ /* First pass, only consider tiled connectors */
+ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -475,8 +475,10 @@ retry:
conn_configured |= BIT(i);
}
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ if (conn_configured != conn_seq) { /* repeat until no more are found */
+ conn_seq = conn_configured;
goto retry;
+ }
/*
* If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dec1e081f529..6a8fb6fd183c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index bb03079fbade..59279224e07f 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -602,6 +602,7 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
ret = dw_i2c_clk_cfg(master);
if (ret)
return ret;
+ /* fall through */
case I3C_BUS_MODE_PURE:
ret = dw_i3c_clk_cfg(master);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index c13c0ba30f63..d499cd61c0e8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
static int c4iw_rdev_open(struct c4iw_rdev *rdev)
{
int err;
+ unsigned int factor;
c4iw_init_dev_ucontext(rdev, &rdev->uctx);
@@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
return -EINVAL;
}
- rdev->qpmask = rdev->lldi.udb_density - 1;
- rdev->cqmask = rdev->lldi.ucq_density - 1;
+ /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
+ if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
+ pr_err("%s: unsupported sge host page size %u\n",
+ pci_name(rdev->lldi.pdev),
+ rdev->lldi.sge_host_page_size);
+ return -EINVAL;
+ }
+
+ factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
+ rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
+ rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
+
pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 31d91538bbf4..694324b37480 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (status)
return FAILED;
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
- for (j = 0; j < target->req_ring_size; ++j) {
- struct srp_request *req = &ch->req_ring[j];
-
- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
- }
- }
-
return SUCCESS;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index dc9f14811e0f..58dc70bffd5b 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
for (tmp = dev; tmp; tmp = tmp->bus->self)
level++;
- size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+ size = sizeof(*info) + level * sizeof(info->path[0]);
if (size <= sizeof(dmar_pci_notify_info_buf)) {
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
} else {
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index d713271ebf7c..a64116586b4c 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Clear ring flush state */
timeout = 1000; /* timeout of 1s */
- writel_relaxed(0x0, ring + RING_CONTROL);
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
do {
- if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+ if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
FLUSH_DONE_MASK))
break;
mdelay(1);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index c6a7d4582dc6..38d9df3fb199 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
return ret;
}
+EXPORT_SYMBOL_GPL(mbox_flush);
/**
* mbox_request_channel - Request a mailbox channel.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index abba078f7f49..95ffe008ebdf 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,7 +8,7 @@ mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- slot-gpio.o
+ slot-gpio.o regulator.o
mmc_core-$(CONFIG_OF) += pwrseq.o
obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
obj-$(CONFIG_PWRSEQ_SD8787) += pwrseq_sd8787.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 14f3fdb8c6bb..2c71a434c915 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1124,7 +1124,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr, arg;
+ unsigned int from, nr;
int err = 0, type = MMC_BLK_DISCARD;
blk_status_t status = BLK_STS_OK;
@@ -1136,24 +1136,18 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_discard(card))
- arg = MMC_DISCARD_ARG;
- else if (mmc_can_trim(card))
- arg = MMC_TRIM_ARG;
- else
- arg = MMC_ERASE_ARG;
do {
err = 0;
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
- arg == MMC_TRIM_ARG ?
+ card->erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
0);
}
if (!err)
- err = mmc_erase(card, from, nr, arg);
+ err = mmc_erase(card, from, nr, card->erase_arg);
} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
if (err)
status = BLK_STS_IOERR;
@@ -2380,12 +2374,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%u%s", card->host->index, subname ? subname : "");
- if (mmc_card_mmc(card))
- blk_queue_logical_block_size(md->queue.queue,
- card->ext_csd.data_sector_size);
- else
- blk_queue_logical_block_size(md->queue.queue, 512);
-
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
@@ -2774,8 +2762,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
return ret;
}
-DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
- NULL, "%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
+ NULL, "%08llx\n");
/* That is two digits * 512 + 1 for newline */
#define EXT_CSD_STR_LEN 1025
@@ -2863,8 +2851,9 @@ static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
if (mmc_card_mmc(card) || mmc_card_sd(card)) {
md->status_dentry =
- debugfs_create_file("status", S_IRUSR, root, card,
- &mmc_dbg_card_status_fops);
+ debugfs_create_file_unsafe("status", 0400, root,
+ card,
+ &mmc_dbg_card_status_fops);
if (!md->status_dentry)
return -EIO;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5bd58b95d318..6db36dc870b5 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -21,7 +21,6 @@
#include <linux/leds.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
-#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
@@ -52,6 +51,7 @@
/* The max erase timeout, used when host->max_busy_timeout isn't specified */
#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
+#define SD_DISCARD_TIMEOUT_MS (250)
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
@@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
if (!data)
return;
- if (cmd->error || data->error ||
+ if ((cmd && cmd->error) || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
@@ -758,33 +758,6 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_set_data_timeout);
-/**
- * mmc_align_data_size - pads a transfer size to a more optimal value
- * @card: the MMC card associated with the data transfer
- * @sz: original transfer size
- *
- * Pads the original data size with a number of extra bytes in
- * order to avoid controller bugs and/or performance hits
- * (e.g. some controllers revert to PIO for certain sizes).
- *
- * Returns the improved size, which might be unmodified.
- *
- * Note that this function is only relevant when issuing a
- * single scatter gather entry.
- */
-unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
-{
- /*
- * FIXME: We don't have a system for the controller to tell
- * the core about its problems yet, so for now we just 32-bit
- * align the size.
- */
- sz = ((sz + 3) / 4) * 4;
-
- return sz;
-}
-EXPORT_SYMBOL(mmc_align_data_size);
-
/*
* Allow claiming an already claimed host if the context is the same or there is
* no context but the task is the same.
@@ -1112,55 +1085,6 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
return mask;
}
-EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
-
-#ifdef CONFIG_OF
-
-/**
- * mmc_of_parse_voltage - return mask of supported voltages
- * @np: The device node need to be parsed.
- * @mask: mask of voltages available for MMC/SD/SDIO
- *
- * Parse the "voltage-ranges" DT property, returning zero if it is not
- * found, negative errno if the voltage-range specification is invalid,
- * or one if the voltage-range is specified and successfully parsed.
- */
-int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
-{
- const u32 *voltage_ranges;
- int num_ranges, i;
-
- voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
- num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
- if (!voltage_ranges) {
- pr_debug("%pOF: voltage-ranges unspecified\n", np);
- return 0;
- }
- if (!num_ranges) {
- pr_err("%pOF: voltage-ranges empty\n", np);
- return -EINVAL;
- }
-
- for (i = 0; i < num_ranges; i++) {
- const int j = i * 2;
- u32 ocr_mask;
-
- ocr_mask = mmc_vddrange_to_ocrmask(
- be32_to_cpu(voltage_ranges[j]),
- be32_to_cpu(voltage_ranges[j + 1]));
- if (!ocr_mask) {
- pr_err("%pOF: voltage-range #%d is invalid\n",
- np, i);
- return -EINVAL;
- }
- *mask |= ocr_mask;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(mmc_of_parse_voltage);
-
-#endif /* CONFIG_OF */
static int mmc_of_get_func_num(struct device_node *node)
{
@@ -1190,246 +1114,6 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host,
return NULL;
}
-#ifdef CONFIG_REGULATOR
-
-/**
- * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
- * @vdd_bit: OCR bit number
- * @min_uV: minimum voltage value (mV)
- * @max_uV: maximum voltage value (mV)
- *
- * This function returns the voltage range according to the provided OCR
- * bit number. If conversion is not possible a negative errno value returned.
- */
-static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
-{
- int tmp;
-
- if (!vdd_bit)
- return -EINVAL;
-
- /*
- * REVISIT mmc_vddrange_to_ocrmask() may have set some
- * bits this regulator doesn't quite support ... don't
- * be too picky, most cards and regulators are OK with
- * a 0.1V range goof (it's a small error percentage).
- */
- tmp = vdd_bit - ilog2(MMC_VDD_165_195);
- if (tmp == 0) {
- *min_uV = 1650 * 1000;
- *max_uV = 1950 * 1000;
- } else {
- *min_uV = 1900 * 1000 + tmp * 100 * 1000;
- *max_uV = *min_uV + 100 * 1000;
- }
-
- return 0;
-}
-
-/**
- * mmc_regulator_get_ocrmask - return mask of supported voltages
- * @supply: regulator to use
- *
- * This returns either a negative errno, or a mask of voltages that
- * can be provided to MMC/SD/SDIO devices using the specified voltage
- * regulator. This would normally be called before registering the
- * MMC host adapter.
- */
-int mmc_regulator_get_ocrmask(struct regulator *supply)
-{
- int result = 0;
- int count;
- int i;
- int vdd_uV;
- int vdd_mV;
-
- count = regulator_count_voltages(supply);
- if (count < 0)
- return count;
-
- for (i = 0; i < count; i++) {
- vdd_uV = regulator_list_voltage(supply, i);
- if (vdd_uV <= 0)
- continue;
-
- vdd_mV = vdd_uV / 1000;
- result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
- }
-
- if (!result) {
- vdd_uV = regulator_get_voltage(supply);
- if (vdd_uV <= 0)
- return vdd_uV;
-
- vdd_mV = vdd_uV / 1000;
- result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
- }
-
- return result;
-}
-EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
-
-/**
- * mmc_regulator_set_ocr - set regulator to match host->ios voltage
- * @mmc: the host to regulate
- * @supply: regulator to use
- * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
- *
- * Returns zero on success, else negative errno.
- *
- * MMC host drivers may use this to enable or disable a regulator using
- * a particular supply voltage. This would normally be called from the
- * set_ios() method.
- */
-int mmc_regulator_set_ocr(struct mmc_host *mmc,
- struct regulator *supply,
- unsigned short vdd_bit)
-{
- int result = 0;
- int min_uV, max_uV;
-
- if (vdd_bit) {
- mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
-
- result = regulator_set_voltage(supply, min_uV, max_uV);
- if (result == 0 && !mmc->regulator_enabled) {
- result = regulator_enable(supply);
- if (!result)
- mmc->regulator_enabled = true;
- }
- } else if (mmc->regulator_enabled) {
- result = regulator_disable(supply);
- if (result == 0)
- mmc->regulator_enabled = false;
- }
-
- if (result)
- dev_err(mmc_dev(mmc),
- "could not set regulator OCR (%d)\n", result);
- return result;
-}
-EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
-
-static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
- int min_uV, int target_uV,
- int max_uV)
-{
- /*
- * Check if supported first to avoid errors since we may try several
- * signal levels during power up and don't want to show errors.
- */
- if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
- return -EINVAL;
-
- return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
- max_uV);
-}
-
-/**
- * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
- *
- * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
- * That will match the behavior of old boards where VQMMC and VMMC were supplied
- * by the same supply. The Bus Operating conditions for 3.3V signaling in the
- * SD card spec also define VQMMC in terms of VMMC.
- * If this is not possible we'll try the full 2.7-3.6V of the spec.
- *
- * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
- * requested voltage. This is definitely a good idea for UHS where there's a
- * separate regulator on the card that's trying to make 1.8V and it's best if
- * we match.
- *
- * This function is expected to be used by a controller's
- * start_signal_voltage_switch() function.
- */
-int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct device *dev = mmc_dev(mmc);
- int ret, volt, min_uV, max_uV;
-
- /* If no vqmmc supply then we can't change the voltage */
- if (IS_ERR(mmc->supply.vqmmc))
- return -EINVAL;
-
- switch (ios->signal_voltage) {
- case MMC_SIGNAL_VOLTAGE_120:
- return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- 1100000, 1200000, 1300000);
- case MMC_SIGNAL_VOLTAGE_180:
- return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- 1700000, 1800000, 1950000);
- case MMC_SIGNAL_VOLTAGE_330:
- ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
- if (ret < 0)
- return ret;
-
- dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
- __func__, volt, max_uV);
-
- min_uV = max(volt - 300000, 2700000);
- max_uV = min(max_uV + 200000, 3600000);
-
- /*
- * Due to a limitation in the current implementation of
- * regulator_set_voltage_triplet() which is taking the lowest
- * voltage possible if below the target, search for a suitable
- * voltage in two steps and try to stay close to vmmc
- * with a 0.3V tolerance at first.
- */
- if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- min_uV, volt, max_uV))
- return 0;
-
- return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
- 2700000, volt, 3600000);
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
-
-#endif /* CONFIG_REGULATOR */
-
-/**
- * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
- * @mmc: the host to regulate
- *
- * Returns 0 or errno. errno should be handled, it is either a critical error
- * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
- * regulators have been found because they all are optional. If you require
- * certain regulators, you need to check separately in your driver if they got
- * populated after calling this function.
- */
-int mmc_regulator_get_supply(struct mmc_host *mmc)
-{
- struct device *dev = mmc_dev(mmc);
- int ret;
-
- mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
- mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
-
- if (IS_ERR(mmc->supply.vmmc)) {
- if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_dbg(dev, "No vmmc regulator found\n");
- } else {
- ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
- if (ret > 0)
- mmc->ocr_avail = ret;
- else
- dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
- }
-
- if (IS_ERR(mmc->supply.vqmmc)) {
- if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_dbg(dev, "No vqmmc regulator found\n");
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
-
/*
* Mask off any voltages we don't support and select
* the lowest voltage
@@ -1936,6 +1620,12 @@ static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
{
unsigned int erase_timeout;
+ /* for DISCARD none of the below calculation applies.
+ * the busy timeout is 250msec per discard command.
+ */
+ if (arg == SD_DISCARD_ARG)
+ return SD_DISCARD_TIMEOUT_MS;
+
if (card->ssr.erase_timeout) {
/* Erase timeout specified in SD Status Register (SSR) */
erase_timeout = card->ssr.erase_timeout * qty +
@@ -2164,7 +1854,7 @@ static unsigned int mmc_align_erase_size(struct mmc_card *card,
* @card: card to erase
* @from: first sector to erase
* @nr: number of sectors to erase
- * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
+ * @arg: erase command argument
*
* Caller must claim host before calling this function.
*/
@@ -2181,14 +1871,14 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
if (!card->erase_size)
return -EOPNOTSUPP;
- if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
+ if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
return -EOPNOTSUPP;
- if ((arg & MMC_SECURE_ARGS) &&
+ if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
- if ((arg & MMC_TRIM_ARGS) &&
+ if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
@@ -2381,9 +2071,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
return card->pref_erase;
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
- if (max_discard && mmc_can_trim(card)) {
+ if (mmc_can_trim(card)) {
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
- if (max_trim < max_discard)
+ if (max_trim < max_discard || max_discard == 0)
max_discard = max_trim;
} else if (max_discard < card->erase_size) {
max_discard = 0;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 8fb6bc37f808..b5083b13d594 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -59,6 +59,7 @@ void mmc_power_up(struct mmc_host *host, u32 ocr);
void mmc_power_off(struct mmc_host *host);
void mmc_power_cycle(struct mmc_host *host, u32 ocr);
void mmc_set_initial_state(struct mmc_host *host);
+u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
static inline void mmc_delay(unsigned int ms)
{
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index cf58ccaf22d5..3a4402a79904 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -194,7 +194,7 @@ int mmc_of_parse(struct mmc_host *host)
switch (bus_width) {
case 8:
host->caps |= MMC_CAP_8_BIT_DATA;
- /* Hosts capable of 8-bit transfers can also do 4 bits */
+ /* fall through - Hosts capable of 8-bit can also do 4 bits */
case 4:
host->caps |= MMC_CAP_4_BIT_DATA;
break;
@@ -260,7 +260,7 @@ int mmc_of_parse(struct mmc_host *host)
/* Parse Write Protection */
ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
- ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+ ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
@@ -349,6 +349,50 @@ int mmc_of_parse(struct mmc_host *host)
EXPORT_SYMBOL(mmc_of_parse);
/**
+ * mmc_of_parse_voltage - return mask of supported voltages
+ * @np: The device node need to be parsed.
+ * @mask: mask of voltages available for MMC/SD/SDIO
+ *
+ * Parse the "voltage-ranges" DT property, returning zero if it is not
+ * found, negative errno if the voltage-range specification is invalid,
+ * or one if the voltage-range is specified and successfully parsed.
+ */
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
+{
+ const u32 *voltage_ranges;
+ int num_ranges, i;
+
+ voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+ if (!voltage_ranges) {
+ pr_debug("%pOF: voltage-ranges unspecified\n", np);
+ return 0;
+ }
+ if (!num_ranges) {
+ pr_err("%pOF: voltage-ranges empty\n", np);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ const int j = i * 2;
+ u32 ocr_mask;
+
+ ocr_mask = mmc_vddrange_to_ocrmask(
+ be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
+ if (!ocr_mask) {
+ pr_err("%pOF: voltage-range #%d is invalid\n",
+ np, i);
+ return -EINVAL;
+ }
+ *mask |= ocr_mask;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(mmc_of_parse_voltage);
+
+/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
* @dev: pointer to host device model structure
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index da892a599524..3e786ba204c3 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1594,6 +1594,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (oldcard) {
if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
err = -ENOENT;
goto err;
}
@@ -1743,6 +1745,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
}
+ /* set erase_arg */
+ if (mmc_can_discard(card))
+ card->erase_arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
+ card->erase_arg = MMC_TRIM_ARG;
+ else
+ card->erase_arg = MMC_ERASE_ARG;
+
/*
* Select timing interface
*/
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 9054329fe903..c5208fb312ae 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -562,7 +562,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (index == EXT_CSD_SANITIZE_START)
cmd.sanitize_busy = true;
- err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto out;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 35cc138b096d..15a45ec6518d 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
+ unsigned block_size = 512;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ if (mmc_card_mmc(card))
+ block_size = card->ext_csd.data_sector_size;
+
+ blk_queue_logical_block_size(mq->queue, block_size);
+ blk_queue_max_segment_size(mq->queue,
+ round_down(host->max_seg_size, block_size));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
new file mode 100644
index 000000000000..b6febbcf8978
--- /dev/null
+++ b/drivers/mmc/core/regulator.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions for MMC regulators.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/mmc/host.h>
+
+#include "core.h"
+#include "host.h"
+
+#ifdef CONFIG_REGULATOR
+
+/**
+ * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
+ * @vdd_bit: OCR bit number
+ * @min_uV: minimum voltage value (mV)
+ * @max_uV: maximum voltage value (mV)
+ *
+ * This function returns the voltage range according to the provided OCR
+ * bit number. If conversion is not possible a negative errno value returned.
+ */
+static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
+{
+ int tmp;
+
+ if (!vdd_bit)
+ return -EINVAL;
+
+ /*
+ * REVISIT mmc_vddrange_to_ocrmask() may have set some
+ * bits this regulator doesn't quite support ... don't
+ * be too picky, most cards and regulators are OK with
+ * a 0.1V range goof (it's a small error percentage).
+ */
+ tmp = vdd_bit - ilog2(MMC_VDD_165_195);
+ if (tmp == 0) {
+ *min_uV = 1650 * 1000;
+ *max_uV = 1950 * 1000;
+ } else {
+ *min_uV = 1900 * 1000 + tmp * 100 * 1000;
+ *max_uV = *min_uV + 100 * 1000;
+ }
+
+ return 0;
+}
+
+/**
+ * mmc_regulator_get_ocrmask - return mask of supported voltages
+ * @supply: regulator to use
+ *
+ * This returns either a negative errno, or a mask of voltages that
+ * can be provided to MMC/SD/SDIO devices using the specified voltage
+ * regulator. This would normally be called before registering the
+ * MMC host adapter.
+ */
+static int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ int result = 0;
+ int count;
+ int i;
+ int vdd_uV;
+ int vdd_mV;
+
+ count = regulator_count_voltages(supply);
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ vdd_uV = regulator_list_voltage(supply, i);
+ if (vdd_uV <= 0)
+ continue;
+
+ vdd_mV = vdd_uV / 1000;
+ result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
+ if (!result) {
+ vdd_uV = regulator_get_voltage(supply);
+ if (vdd_uV <= 0)
+ return vdd_uV;
+
+ vdd_mV = vdd_uV / 1000;
+ result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
+ return result;
+}
+
+/**
+ * mmc_regulator_set_ocr - set regulator to match host->ios voltage
+ * @mmc: the host to regulate
+ * @supply: regulator to use
+ * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * MMC host drivers may use this to enable or disable a regulator using
+ * a particular supply voltage. This would normally be called from the
+ * set_ios() method.
+ */
+int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
+{
+ int result = 0;
+ int min_uV, max_uV;
+
+ if (vdd_bit) {
+ mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
+
+ result = regulator_set_voltage(supply, min_uV, max_uV);
+ if (result == 0 && !mmc->regulator_enabled) {
+ result = regulator_enable(supply);
+ if (!result)
+ mmc->regulator_enabled = true;
+ }
+ } else if (mmc->regulator_enabled) {
+ result = regulator_disable(supply);
+ if (result == 0)
+ mmc->regulator_enabled = false;
+ }
+
+ if (result)
+ dev_err(mmc_dev(mmc),
+ "could not set regulator OCR (%d)\n", result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
+
+static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
+ int min_uV, int target_uV,
+ int max_uV)
+{
+ /*
+ * Check if supported first to avoid errors since we may try several
+ * signal levels during power up and don't want to show errors.
+ */
+ if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
+ return -EINVAL;
+
+ return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
+ max_uV);
+}
+
+/**
+ * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
+ *
+ * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
+ * That will match the behavior of old boards where VQMMC and VMMC were supplied
+ * by the same supply. The Bus Operating conditions for 3.3V signaling in the
+ * SD card spec also define VQMMC in terms of VMMC.
+ * If this is not possible we'll try the full 2.7-3.6V of the spec.
+ *
+ * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
+ * requested voltage. This is definitely a good idea for UHS where there's a
+ * separate regulator on the card that's trying to make 1.8V and it's best if
+ * we match.
+ *
+ * This function is expected to be used by a controller's
+ * start_signal_voltage_switch() function.
+ */
+int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ int ret, volt, min_uV, max_uV;
+
+ /* If no vqmmc supply then we can't change the voltage */
+ if (IS_ERR(mmc->supply.vqmmc))
+ return -EINVAL;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_120:
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 1100000, 1200000, 1300000);
+ case MMC_SIGNAL_VOLTAGE_180:
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 1700000, 1800000, 1950000);
+ case MMC_SIGNAL_VOLTAGE_330:
+ ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
+ __func__, volt, max_uV);
+
+ min_uV = max(volt - 300000, 2700000);
+ max_uV = min(max_uV + 200000, 3600000);
+
+ /*
+ * Due to a limitation in the current implementation of
+ * regulator_set_voltage_triplet() which is taking the lowest
+ * voltage possible if below the target, search for a suitable
+ * voltage in two steps and try to stay close to vmmc
+ * with a 0.3V tolerance at first.
+ */
+ if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ min_uV, volt, max_uV))
+ return 0;
+
+ return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
+ 2700000, volt, 3600000);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
+
+#else
+
+static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ return 0;
+}
+
+#endif /* CONFIG_REGULATOR */
+
+/**
+ * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. errno should be handled, it is either a critical error
+ * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
+ * regulators have been found because they all are optional. If you require
+ * certain regulators, you need to check separately in your driver if they got
+ * populated after calling this function.
+ */
+int mmc_regulator_get_supply(struct mmc_host *mmc)
+{
+ struct device *dev = mmc_dev(mmc);
+ int ret;
+
+ mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
+ mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
+
+ if (IS_ERR(mmc->supply.vmmc)) {
+ if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(dev, "No vmmc regulator found\n");
+ } else {
+ ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+ if (ret > 0)
+ mmc->ocr_avail = ret;
+ else
+ dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
+ }
+
+ if (IS_ERR(mmc->supply.vqmmc)) {
+ if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(dev, "No vqmmc regulator found\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d0d9f90e7cdf..265e1aeeb9d8 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -209,6 +209,11 @@ static int mmc_decode_scr(struct mmc_card *card)
/* Check if Physical Layer Spec v3.0 is supported */
scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
+ if (scr->sda_spec3) {
+ scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1);
+ scr->sda_specx = UNSTUFF_BITS(resp, 38, 4);
+ }
+
if (UNSTUFF_BITS(resp, 55, 1))
card->erased_byte = 0xFF;
else
@@ -226,6 +231,8 @@ static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
__be32 *raw_ssr;
+ u32 resp[4] = {};
+ u8 discard_support;
int i;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -271,6 +278,14 @@ static int mmc_read_ssr(struct mmc_card *card)
}
}
+ /*
+ * starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set
+ */
+ resp[3] = card->raw_ssr[6];
+ discard_support = UNSTUFF_BITS(resp, 313 - 288, 1);
+ card->erase_arg = (card->scr.sda_specx && discard_support) ?
+ SD_DISCARD_ARG : SD_ERASE_ARG;
+
return 0;
}
@@ -936,8 +951,11 @@ retry:
return err;
if (oldcard) {
- if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0)
+ if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
return -ENOENT;
+ }
card = oldcard;
} else {
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 47056d8d1bac..0bb0b8419016 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -52,36 +52,17 @@ int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
}
EXPORT_SYMBOL_GPL(mmc_app_cmd);
-/**
- * mmc_wait_for_app_cmd - start an application command and wait for
- completion
- * @host: MMC host to start command
- * @card: Card to send MMC_APP_CMD to
- * @cmd: MMC command to start
- * @retries: maximum number of retries
- *
- * Sends a MMC_APP_CMD, checks the card response, sends the command
- * in the parameter and waits for it to complete. Return any error
- * that occurred while the command was executing. Do not attempt to
- * parse the response.
- */
-int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
- struct mmc_command *cmd, int retries)
+static int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
+ struct mmc_command *cmd)
{
struct mmc_request mrq = {};
-
- int i, err;
-
- if (retries < 0)
- retries = MMC_CMD_RETRIES;
-
- err = -EIO;
+ int i, err = -EIO;
/*
* We have to resend MMC_APP_CMD for each attempt so
* we cannot use the retries field in mmc_command.
*/
- for (i = 0;i <= retries;i++) {
+ for (i = 0; i <= MMC_CMD_RETRIES; i++) {
err = mmc_app_cmd(host, card);
if (err) {
/* no point in retrying; no APP commands allowed */
@@ -116,8 +97,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
return err;
}
-EXPORT_SYMBOL(mmc_wait_for_app_cmd);
-
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
struct mmc_command cmd = {};
@@ -136,7 +115,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
return -EINVAL;
}
- return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
+ return mmc_wait_for_app_cmd(card->host, card, &cmd);
}
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
@@ -152,7 +131,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
for (i = 100; i; i--) {
- err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES);
+ err = mmc_wait_for_app_cmd(host, NULL, &cmd);
if (err)
break;
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index 0e6c3d51e66d..ffaed5cacc88 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -16,7 +16,6 @@
struct mmc_card;
struct mmc_host;
-struct mmc_command;
int mmc_app_set_bus_width(struct mmc_card *card, int width);
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
@@ -27,8 +26,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp);
int mmc_app_sd_status(struct mmc_card *card, void *ssr);
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
-int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
- struct mmc_command *cmd, int retries);
#endif
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index d8e17ea6126d..6718fc8bb40f 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -617,6 +617,8 @@ try_again:
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
mmc_remove_card(card);
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
return -ENOENT;
}
} else {
@@ -624,6 +626,8 @@ try_again:
if (oldcard && oldcard->type != MMC_TYPE_SDIO) {
mmc_remove_card(card);
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
return -ENOENT;
}
}
@@ -736,8 +740,11 @@ try_again:
int same = (card->cis.vendor == oldcard->cis.vendor &&
card->cis.device == oldcard->cis.device);
mmc_remove_card(card);
- if (!same)
+ if (!same) {
+ pr_debug("%s: Perhaps the card was replaced\n",
+ mmc_hostname(host));
return -ENOENT;
+ }
card = oldcard;
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index b6d8203e46eb..62b0f5ecc7f7 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -179,7 +179,6 @@ static int sdio_bus_remove(struct device *dev)
{
struct sdio_driver *drv = to_sdio_driver(dev->driver);
struct sdio_func *func = dev_to_sdio_func(dev);
- int ret = 0;
/* Make sure card is powered before invoking ->remove() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
@@ -205,7 +204,7 @@ static int sdio_bus_remove(struct device *dev)
dev_pm_domain_detach(dev, false);
- return ret;
+ return 0;
}
static const struct dev_pm_ops sdio_bus_pm_ops = {
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index d40744bbafa9..3f67fbbe0d75 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -10,6 +10,7 @@
*/
#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
@@ -203,6 +204,21 @@ static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
return min(mval, 512u); /* maximum size for byte mode */
}
+/*
+ * This is legacy code, which needs to be re-worked some day. Basically we need
+ * to take into account the properties of the host, as to enable the SDIO func
+ * driver layer to allocate optimal buffers.
+ */
+static inline unsigned int _sdio_align_size(unsigned int sz)
+{
+ /*
+ * FIXME: We don't have a system for the controller to tell
+ * the core about its problems yet, so for now we just 32-bit
+ * align the size.
+ */
+ return ALIGN(sz, 4);
+}
+
/**
* sdio_align_size - pads a transfer size to a more optimal value
* @func: SDIO function
@@ -230,7 +246,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
* wants to increase the size up to a point where it
* might need more than one block.
*/
- sz = mmc_align_data_size(func->card, sz);
+ sz = _sdio_align_size(sz);
/*
* If we can still do this with just a byte transfer, then
@@ -252,7 +268,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
*/
blk_sz = ((sz + func->cur_blksize - 1) /
func->cur_blksize) * func->cur_blksize;
- blk_sz = mmc_align_data_size(func->card, blk_sz);
+ blk_sz = _sdio_align_size(blk_sz);
/*
* This value is only good if it is still just
@@ -265,8 +281,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
* We failed to do one request, but at least try to
* pad the remainder properly.
*/
- byte_sz = mmc_align_data_size(func->card,
- sz % func->cur_blksize);
+ byte_sz = _sdio_align_size(sz % func->cur_blksize);
if (byte_sz <= sdio_max_byte_size(func)) {
blk_sz = sz / func->cur_blksize;
return blk_sz * func->cur_blksize + byte_sz;
@@ -276,16 +291,14 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
* We need multiple requests, so first check that the
* controller can handle the chunk size;
*/
- chunk_sz = mmc_align_data_size(func->card,
- sdio_max_byte_size(func));
+ chunk_sz = _sdio_align_size(sdio_max_byte_size(func));
if (chunk_sz == sdio_max_byte_size(func)) {
/*
* Fix up the size of the remainder (if any)
*/
byte_sz = orig_sz % chunk_sz;
if (byte_sz) {
- byte_sz = mmc_align_data_size(func->card,
- byte_sz);
+ byte_sz = _sdio_align_size(byte_sz);
}
return (orig_sz / chunk_sz) * chunk_sz + byte_sz;
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
index 96945cafbf0b..1f6d0447ea0f 100644
--- a/drivers/mmc/core/sdio_ops.h
+++ b/drivers/mmc/core/sdio_ops.h
@@ -25,7 +25,6 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
int sdio_reset(struct mmc_host *host);
-unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz);
void sdio_irq_work(struct work_struct *work);
static inline bool sdio_is_io_busy(u32 opcode, u32 arg)
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 319ccd93383d..4afc6b87b465 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -22,7 +22,6 @@
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
- bool override_ro_active_level;
bool override_cd_active_level;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
@@ -71,10 +70,6 @@ int mmc_gpio_get_ro(struct mmc_host *host)
if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
- if (ctx->override_ro_active_level)
- return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^
- !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
-
return gpiod_get_value_cansleep(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
@@ -225,7 +220,6 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
* @host: mmc host
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
- * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
* @debounce: debounce time in microseconds
* @gpio_invert: will return whether the GPIO line is inverted or not,
* set to NULL to ignore
@@ -233,7 +227,7 @@ EXPORT_SYMBOL(mmc_can_gpio_cd);
* Returns zero on success, else an error.
*/
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx, bool override_active_level,
+ unsigned int idx,
unsigned int debounce, bool *gpio_invert)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
@@ -253,7 +247,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
if (gpio_invert)
*gpio_invert = !gpiod_is_active_low(desc);
- ctx->override_ro_active_level = override_active_level;
ctx->ro_gpio = desc;
return 0;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index a44ec8bb5418..28fcd8f580a1 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -224,6 +224,7 @@ config MMC_SDHCI_ESDHC_IMX
depends on ARCH_MXC
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Freescale eSDHC/uSDHC controller support
found on i.MX25, i.MX35 i.MX5x and i.MX6x.
@@ -250,6 +251,7 @@ config MMC_SDHCI_TEGRA
depends on ARCH_TEGRA
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Tegra SD/MMC controller. If you have a Tegra
platform with SD or MMC devices, say Y or M here.
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 47189f9ed4e2..735aa5871358 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1410,6 +1410,9 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ slot->sdc_reg |= ATMCI_SDCBUS_8BIT;
+ break;
}
if (ios->clock) {
@@ -2275,8 +2278,11 @@ static int atmci_init_slot(struct atmel_mci *host,
* use only one bit for data to prevent fifo underruns and overruns
* which will corrupt data.
*/
- if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
+ if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) {
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ if (slot_data->bus_width >= 8)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
+ }
if (atmci_get_version(host) < 0x200) {
mmc->max_segs = 256;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index c9e7aa50bb0a..7e0d3a49c06d 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -148,7 +148,6 @@ struct bcm2835_host {
void __iomem *ioaddr;
u32 phys_addr;
- struct mmc_host *mmc;
struct platform_device *pdev;
int clock; /* Current clock speed */
@@ -618,7 +617,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host)
"failed to terminate DMA (%d)\n", err);
}
- mmc_request_done(host->mmc, mrq);
+ mmc_request_done(mmc_from_priv(host), mrq);
}
static
@@ -837,7 +836,7 @@ static void bcm2835_timeout(struct work_struct *work)
dev_err(dev, "timeout waiting for hardware interrupt.\n");
bcm2835_dumpregs(host);
- bcm2835_reset(host->mmc);
+ bcm2835_reset(mmc_from_priv(host));
if (host->data) {
host->data->error = -ETIMEDOUT;
@@ -1100,6 +1099,7 @@ static void bcm2835_dma_complete_work(struct work_struct *work)
static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
int div;
/* The SDCDIV register has 11 bits, and holds (div - 2). But
@@ -1143,18 +1143,18 @@ static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock)
div = SDCDIV_MAX_CDIV;
clock = host->max_clk / (div + 2);
- host->mmc->actual_clock = clock;
+ mmc->actual_clock = clock;
/* Calibrate some delays */
host->ns_per_fifo_word = (1000000000 / clock) *
- ((host->mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
+ ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
host->cdiv = div;
writel(host->cdiv, host->ioaddr + SDCDIV);
/* Set the timeout to 500ms */
- writel(host->mmc->actual_clock / 2, host->ioaddr + SDTOUT);
+ writel(mmc->actual_clock / 2, host->ioaddr + SDTOUT);
}
static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -1264,7 +1264,7 @@ static const struct mmc_host_ops bcm2835_ops = {
static int bcm2835_add_host(struct bcm2835_host *host)
{
- struct mmc_host *mmc = host->mmc;
+ struct mmc_host *mmc = mmc_from_priv(host);
struct device *dev = &host->pdev->dev;
char pio_limit_string[20];
int ret;
@@ -1286,7 +1286,7 @@ static int bcm2835_add_host(struct bcm2835_host *host)
spin_lock_init(&host->lock);
mutex_init(&host->mutex);
- if (IS_ERR_OR_NULL(host->dma_chan_rxtx)) {
+ if (!host->dma_chan_rxtx) {
dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n");
host->use_dma = false;
} else {
@@ -1370,7 +1370,6 @@ static int bcm2835_probe(struct platform_device *pdev)
mmc->ops = &bcm2835_ops;
host = mmc_priv(mmc);
- host->mmc = mmc;
host->pdev = pdev;
spin_lock_init(&host->lock);
@@ -1441,8 +1440,9 @@ err:
static int bcm2835_remove(struct platform_device *pdev)
{
struct bcm2835_host *host = platform_get_drvdata(pdev);
+ struct mmc_host *mmc = mmc_from_priv(host);
- mmc_remove_host(host->mmc);
+ mmc_remove_host(mmc);
writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
@@ -1454,8 +1454,7 @@ static int bcm2835_remove(struct platform_device *pdev)
if (host->dma_chan_rxtx)
dma_release_channel(host->dma_chan_rxtx);
- mmc_free_host(host->mmc);
- platform_set_drvdata(pdev, NULL);
+ mmc_free_host(mmc);
return 0;
}
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 1087b4c79cd6..4c477dcd2ada 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -566,30 +566,32 @@ static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
cb710_mmc_select_clock_divider(mmc, ios->clock);
- if (ios->power_mode != reader->last_power_mode)
- switch (ios->power_mode) {
- case MMC_POWER_ON:
- err = cb710_mmc_powerup(slot);
- if (err) {
- dev_warn(cb710_slot_dev(slot),
- "powerup failed (%d)- retrying\n", err);
- cb710_mmc_powerdown(slot);
- udelay(1);
+ if (ios->power_mode != reader->last_power_mode) {
+ switch (ios->power_mode) {
+ case MMC_POWER_ON:
err = cb710_mmc_powerup(slot);
- if (err)
+ if (err) {
dev_warn(cb710_slot_dev(slot),
- "powerup retry failed (%d) - expect errors\n",
+ "powerup failed (%d)- retrying\n", err);
+ cb710_mmc_powerdown(slot);
+ udelay(1);
+ err = cb710_mmc_powerup(slot);
+ if (err)
+ dev_warn(cb710_slot_dev(slot),
+ "powerup retry failed (%d) - expect errors\n",
err);
+ }
+ reader->last_power_mode = MMC_POWER_ON;
+ break;
+ case MMC_POWER_OFF:
+ cb710_mmc_powerdown(slot);
+ reader->last_power_mode = MMC_POWER_OFF;
+ break;
+ case MMC_POWER_UP:
+ default:
+ /* ignore */
+ break;
}
- reader->last_power_mode = MMC_POWER_ON;
- break;
- case MMC_POWER_OFF:
- cb710_mmc_powerdown(slot);
- reader->last_power_mode = MMC_POWER_OFF;
- break;
- case MMC_POWER_UP:
- default:
- /* ignore */;
}
cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1);
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 159270e947cf..a8af682a9182 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
- (cq_host->num_slots - 1);
+ cq_host->mmc->cqe_qdepth;
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
@@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size,
&cq_host->desc_dma_base,
GFP_KERNEL);
+ if (!cq_host->desc_base)
+ return -ENOMEM;
+
cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
cq_host->data_size,
&cq_host->trans_desc_dma_base,
GFP_KERNEL);
- if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ if (!cq_host->trans_desc_base) {
+ dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+ cq_host->desc_base = NULL;
+ cq_host->desc_dma_base = 0;
return -ENOMEM;
+ }
pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 9e68c3645e22..49e0daf2ef5e 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1193,7 +1193,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
else if (ret)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 33215d66afa2..63303022669c 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -21,7 +21,6 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -36,7 +35,6 @@
#include <asm/cacheflush.h>
#include <asm/mach-jz4740/dma.h>
-#include <asm/mach-jz4740/jz4740_mmc.h>
#define JZ_REG_MMC_STRPCL 0x00
#define JZ_REG_MMC_STATUS 0x04
@@ -148,9 +146,7 @@ enum jz4780_cookie {
struct jz4740_mmc_host {
struct mmc_host *mmc;
struct platform_device *pdev;
- struct jz4740_mmc_platform_data *pdata;
struct clk *clk;
- struct gpio_desc *power;
enum jz4740_mmc_version version;
@@ -743,6 +739,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
jz_mmc_prepare_data_transfer(host);
+ /* fall through */
case JZ4740_MMC_STATE_TRANSFER_DATA:
if (host->use_dma) {
@@ -777,6 +774,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
break;
}
jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
+ /* fall through */
case JZ4740_MMC_STATE_SEND_STOP:
if (!req->stop)
@@ -894,16 +892,16 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_UP:
jz4740_mmc_reset(host);
- if (host->power)
- gpiod_set_value(host->power, 1);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
host->cmdat |= JZ_MMC_CMDAT_INIT;
clk_prepare_enable(host->clk);
break;
case MMC_POWER_ON:
break;
default:
- if (host->power)
- gpiod_set_value(host->power, 0);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
clk_disable_unprepare(host->clk);
break;
}
@@ -936,38 +934,6 @@ static const struct mmc_host_ops jz4740_mmc_ops = {
.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
};
-static int jz4740_mmc_request_gpios(struct jz4740_mmc_host *host,
- struct mmc_host *mmc,
- struct platform_device *pdev)
-{
- struct jz4740_mmc_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- if (!pdata)
- return 0;
-
- if (!pdata->card_detect_active_low)
- mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
- if (!pdata->read_only_active_low)
- mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
- /*
- * Get optional card detect and write protect GPIOs,
- * only back out on probe deferral.
- */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- host->power = devm_gpiod_get_optional(&pdev->dev, "power",
- GPIOD_OUT_HIGH);
- return PTR_ERR_OR_ZERO(host->power);
-}
-
static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
@@ -982,9 +948,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
struct mmc_host *mmc;
struct jz4740_mmc_host *host;
const struct of_device_id *match;
- struct jz4740_mmc_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
if (!mmc) {
@@ -993,29 +956,25 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
}
host = mmc_priv(mmc);
- host->pdata = pdata;
match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
if (match) {
host->version = (enum jz4740_mmc_version)match->data;
- ret = mmc_of_parse(mmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "could not parse of data: %d\n", ret);
- goto err_free_host;
- }
} else {
/* JZ4740 should be the only one using legacy probe */
host->version = JZ_MMC_JZ4740;
- mmc->caps |= MMC_CAP_SDIO_IRQ;
- if (!(pdata && pdata->data_1bit))
- mmc->caps |= MMC_CAP_4_BIT_DATA;
- ret = jz4740_mmc_request_gpios(host, mmc, pdev);
- if (ret)
- goto err_free_host;
}
+ ret = mmc_of_parse(mmc);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "could not parse device properties: %d\n", ret);
+ goto err_free_host;
+ }
+
+ mmc_regulator_get_supply(mmc);
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = host->irq;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 10ba46b728e8..1b1498805972 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1450,9 +1450,10 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
+ mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */
- status = mmc_gpiod_request_ro(mmc, NULL, 1, false, 0, NULL);
+ status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e352f5ad5801..387ff14587b8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1127,6 +1127,12 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
writel(c, base + MMCICOMMAND);
}
+static void mmci_stop_command(struct mmci_host *host)
+{
+ host->stop_abort.error = 0;
+ mmci_start_command(host, &host->stop_abort, 0);
+}
+
static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
@@ -1196,10 +1202,16 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
/* The error clause is handled above, success! */
data->bytes_xfered = data->blksz * data->blocks;
- if (!data->stop || (host->mrq->sbc && !data->error))
+ if (!data->stop) {
+ if (host->variant->cmdreg_stop && data->error)
+ mmci_stop_command(host);
+ else
+ mmci_request_end(host, data->mrq);
+ } else if (host->mrq->sbc && !data->error) {
mmci_request_end(host, data->mrq);
- else
+ } else {
mmci_start_command(host, data->stop, 0);
+ }
}
}
@@ -1298,6 +1310,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
mmci_dma_error(host);
mmci_stop_data(host);
+ if (host->variant->cmdreg_stop && cmd->error) {
+ mmci_stop_command(host);
+ return;
+ }
}
mmci_request_end(host, host->mrq);
} else if (sbc) {
@@ -1956,6 +1972,11 @@ static int mmci_probe(struct amba_device *dev,
mmc->max_busy_timeout = 0;
}
+ /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
+ host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
+ host->stop_abort.arg = 0;
+ host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
mmc->ops = &mmci_ops;
/* We support these PM capabilities. */
@@ -2011,7 +2032,7 @@ static int mmci_probe(struct amba_device *dev,
if (ret == -EPROBE_DEFER)
goto clk_disable;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
if (ret == -EPROBE_DEFER)
goto clk_disable;
}
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 24229097d05c..14df81054438 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -377,6 +377,7 @@ struct mmci_host {
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
+ struct mmc_command stop_abort;
struct mmc_data *data;
struct mmc_host *mmc;
struct clk *clk;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 4d17032d15ee..d54612257b06 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,14 +31,12 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include <asm/dma.h>
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index add1e70195ea..4f06fb03c0a2 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -25,7 +25,6 @@
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -39,7 +38,6 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
-#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <linux/stmp_device.h>
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index b294b221f225..8a274b91804e 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -61,9 +61,6 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
struct device *dev = &spi->dev;
struct device_node *np = dev->of_node;
struct of_mmc_spi *oms;
- const __be32 *voltage_ranges;
- int num_ranges;
- int i;
if (dev->platform_data || !np)
return dev->platform_data;
@@ -72,25 +69,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
if (!oms)
return NULL;
- voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
- num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
- if (!voltage_ranges || !num_ranges) {
- dev_err(dev, "OF: voltage-ranges unspecified\n");
+ if (mmc_of_parse_voltage(np, &oms->pdata.ocr_mask) <= 0)
goto err_ocr;
- }
-
- for (i = 0; i < num_ranges; i++) {
- const int j = i * 2;
- u32 mask;
-
- mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
- be32_to_cpu(voltage_ranges[j + 1]));
- if (!mask) {
- dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
- goto err_ocr;
- }
- oms->pdata.ocr_mask |= mask;
- }
oms->detect_irq = irq_of_parse_and_map(np, 0);
if (oms->detect_irq != 0) {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index c60a7625b1fa..b2873a2432b6 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques
reg &= ~(1 << 5);
OMAP_MMC_WRITE(host, SDIO, reg);
/* Set maximum timeout */
- OMAP_MMC_WRITE(host, CTO, 0xff);
+ OMAP_MMC_WRITE(host, CTO, 0xfd);
}
static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 8779bbaa6b69..c907bf502a12 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -743,7 +743,7 @@ static int pxamci_probe(struct platform_device *pdev)
goto out;
}
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_ro\n");
goto out;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index da1e49c45bec..8394a7bb1fc1 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -15,6 +15,7 @@
struct renesas_sdhi_scc {
unsigned long clk_rate; /* clock rate for SDR104 */
u32 tap; /* sampling clock position for SDR104 */
+ u32 tap_hs400; /* sampling clock position for HS400 */
};
struct renesas_sdhi_of_data {
@@ -49,6 +50,7 @@ struct renesas_sdhi {
struct pinctrl_state *pins_default, *pins_uhs;
void __iomem *scc_ctl;
u32 scc_tappos;
+ u32 scc_tappos_hs400;
};
#define host_to_priv(host) \
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 31a351a20dc0..71e13844df6c 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -337,6 +337,10 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host)
/* Set HS400 mode */
sd_ctrl_write16(host, CTL_SDIF_MODE, 0x0001 |
sd_ctrl_read16(host, CTL_SDIF_MODE));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
+ priv->scc_tappos_hs400);
+
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
@@ -396,6 +400,9 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
/* Reset HS400 mode */
sd_ctrl_write16(host, CTL_SDIF_MODE, ~0x0001 &
sd_ctrl_read16(host, CTL_SDIF_MODE));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
+
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
~(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) &
@@ -723,6 +730,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
+
+ /* SDR and HS200/400 registers requires HW reset */
+ if (of_data && of_data->scc_offset) {
+ priv->scc_ctl = host->ctl + of_data->scc_offset;
+ host->mmc->caps |= MMC_CAP_HW_RESET;
+ host->hw_reset = renesas_sdhi_hw_reset;
+ }
}
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -775,12 +789,11 @@ int renesas_sdhi_probe(struct platform_device *pdev,
const struct renesas_sdhi_scc *taps = of_data->taps;
bool hit = false;
- host->mmc->caps |= MMC_CAP_HW_RESET;
-
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
priv->scc_tappos = taps->tap;
+ priv->scc_tappos_hs400 = taps->tap_hs400;
hit = true;
break;
}
@@ -789,12 +802,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!hit)
dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
- priv->scc_ctl = host->ctl + of_data->scc_offset;
host->init_tuning = renesas_sdhi_init_tuning;
host->prepare_tuning = renesas_sdhi_prepare_tuning;
host->select_tuning = renesas_sdhi_select_tuning;
host->check_scc_error = renesas_sdhi_check_scc_error;
- host->hw_reset = renesas_sdhi_hw_reset;
host->prepare_hs400_tuning =
renesas_sdhi_prepare_hs400_tuning;
host->hs400_downgrade = renesas_sdhi_disable_scc;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 92c9b15252da..9dfafa2a90a3 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -81,6 +81,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
{
.clk_rate = 0,
.tap = 0x00000300,
+ .tap_hs400 = 0x00000704,
},
};
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 8471160316e0..02cd878e209f 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.scc_offset = 0x0300,
.taps = rcar_gen2_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
+ .max_blk_count = 0xffffffff,
};
/* Definitions for sampling clocks */
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 10f5219b3b40..f31333e831a7 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1530,7 +1530,7 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host)
return ret;
}
- ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
ret);
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index bdbd4897c0f7..a6c2bd202b45 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -18,12 +18,10 @@
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 552bddc5096c..1cd10356fc14 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -55,7 +55,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
}
sdhci_get_of_property(pdev);
- mmc_of_parse(host->mmc);
+ res = mmc_of_parse(host->mmc);
+ if (res)
+ goto err;
/*
* Supply the existing CAPS, but clear the UHS modes. This
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d0d319398a54..8dbbc1f62b70 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -25,6 +25,7 @@
#include <linux/pm_runtime.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
+#include "cqhci.h"
#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f
#define ESDHC_CTRL_D3CD 0x08
@@ -50,6 +51,7 @@
#define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24)
#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
#define ESDHC_MIX_CTRL_HS400_EN (1 << 26)
+#define ESDHC_MIX_CTRL_HS400_ES_EN (1 << 27)
/* Bits 3 and 6 are not SDHCI standard definitions */
#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
/* Tuning bits */
@@ -76,6 +78,9 @@
#define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1)
#define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1
+#define ESDHC_VEND_SPEC2 0xc8
+#define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8)
+
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
@@ -103,6 +108,9 @@
*/
#define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28)
+/* the address offset of CQHCI */
+#define ESDHC_CQHCI_ADDR_OFFSET 0x100
+
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
* "11" when the STOP CMD12 is issued on imx53 to abort one
@@ -138,51 +146,71 @@
#define ESDHC_FLAG_HS200 BIT(8)
/* The IP supports HS400 mode */
#define ESDHC_FLAG_HS400 BIT(9)
-
-/* A clock frequency higher than this rate requires strobe dll control */
-#define ESDHC_STROBE_DLL_CLK_FREQ 100000000
+/*
+ * The IP has errata ERR010450
+ * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't
+ * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
+ */
+#define ESDHC_FLAG_ERR010450 BIT(10)
+/* The IP supports HS400ES mode */
+#define ESDHC_FLAG_HS400_ES BIT(11)
+/* The IP has Host Controller Interface for Command Queuing */
+#define ESDHC_FLAG_CQHCI BIT(12)
struct esdhc_soc_data {
u32 flags;
};
-static struct esdhc_soc_data esdhc_imx25_data = {
+static const struct esdhc_soc_data esdhc_imx25_data = {
.flags = ESDHC_FLAG_ERR004536,
};
-static struct esdhc_soc_data esdhc_imx35_data = {
+static const struct esdhc_soc_data esdhc_imx35_data = {
.flags = ESDHC_FLAG_ERR004536,
};
-static struct esdhc_soc_data esdhc_imx51_data = {
+static const struct esdhc_soc_data esdhc_imx51_data = {
.flags = 0,
};
-static struct esdhc_soc_data esdhc_imx53_data = {
+static const struct esdhc_soc_data esdhc_imx53_data = {
.flags = ESDHC_FLAG_MULTIBLK_NO_INT,
};
-static struct esdhc_soc_data usdhc_imx6q_data = {
+static const struct esdhc_soc_data usdhc_imx6q_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING,
};
-static struct esdhc_soc_data usdhc_imx6sl_data = {
+static const struct esdhc_soc_data usdhc_imx6sl_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
| ESDHC_FLAG_HS200,
};
-static struct esdhc_soc_data usdhc_imx6sx_data = {
+static const struct esdhc_soc_data usdhc_imx6sx_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200,
};
-static struct esdhc_soc_data usdhc_imx7d_data = {
+static const struct esdhc_soc_data usdhc_imx6ull_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_ERR010450,
+};
+
+static const struct esdhc_soc_data usdhc_imx7d_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400,
};
+static struct esdhc_soc_data usdhc_imx8qxp_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ | ESDHC_FLAG_CQHCI,
+};
+
struct pltfm_imx_data {
u32 scratchpad;
struct pinctrl *pinctrl;
@@ -227,7 +255,9 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, },
{ .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
+ { .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, },
{ .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
+ { .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -733,6 +763,14 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
| ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) {
+ unsigned int max_clock;
+
+ max_clock = imx_data->is_ddr ? 45000000 : 150000000;
+
+ clock = min(clock, max_clock);
+ }
+
while (host_clock / (16 * pre_div * ddr_pre_div) > clock &&
pre_div < 256)
pre_div *= 2;
@@ -801,6 +839,20 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
SDHCI_HOST_CONTROL);
}
+static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /*
+ * i.MX uSDHC internally already uses a fixed optimized timing for
+ * DDR50, normally does not require tuning for DDR50 mode.
+ */
+ if (host->timing == MMC_TIMING_UHS_DDR50)
+ return 0;
+
+ return sdhci_execute_tuning(mmc, opcode);
+}
+
static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
{
u32 reg;
@@ -864,6 +916,19 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
return ret;
}
+static void esdhc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 m;
+
+ m = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ if (ios->enhanced_strobe)
+ m |= ESDHC_MIX_CTRL_HS400_ES_EN;
+ else
+ m &= ~ESDHC_MIX_CTRL_HS400_ES_EN;
+ writel(m, host->ioaddr + ESDHC_MIX_CTRL);
+}
+
static int esdhc_change_pinstate(struct sdhci_host *host,
unsigned int uhs)
{
@@ -905,39 +970,35 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
* edge of data_strobe line. Due to the time delay between CLK line and
* data_strobe line, if the delay time is larger than one clock cycle,
* then CLK and data_strobe line will be misaligned, read error shows up.
- * So when the CLK is higher than 100MHz, each clock cycle is short enough,
- * host should configure the delay target.
*/
static void esdhc_set_strobe_dll(struct sdhci_host *host)
{
u32 v;
- if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) {
- /* disable clock before enabling strobe dll */
- writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
- ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
- host->ioaddr + ESDHC_VENDOR_SPEC);
+ /* disable clock before enabling strobe dll */
+ writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
+ ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
- /* force a reset on strobe dll */
- writel(ESDHC_STROBE_DLL_CTRL_RESET,
- host->ioaddr + ESDHC_STROBE_DLL_CTRL);
- /*
- * enable strobe dll ctrl and adjust the delay target
- * for the uSDHC loopback read clock
- */
- v = ESDHC_STROBE_DLL_CTRL_ENABLE |
- (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
- writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
- /* wait 1us to make sure strobe dll status register stable */
- udelay(1);
- v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
- if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
- dev_warn(mmc_dev(host->mmc),
- "warning! HS400 strobe DLL status REF not lock!\n");
- if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
- dev_warn(mmc_dev(host->mmc),
- "warning! HS400 strobe DLL status SLV not lock!\n");
- }
+ /* force a reset on strobe dll */
+ writel(ESDHC_STROBE_DLL_CTRL_RESET,
+ host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /*
+ * enable strobe dll ctrl and adjust the delay target
+ * for the uSDHC loopback read clock
+ */
+ v = ESDHC_STROBE_DLL_CTRL_ENABLE |
+ (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
+ writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
+ /* wait 1us to make sure strobe dll status register stable */
+ udelay(1);
+ v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
+ if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
+ dev_warn(mmc_dev(host->mmc),
+ "warning! HS400 strobe DLL status REF not lock!\n");
+ if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK))
+ dev_warn(mmc_dev(host->mmc),
+ "warning! HS400 strobe DLL status SLV not lock!\n");
}
static void esdhc_reset_tuning(struct sdhci_host *host)
@@ -979,6 +1040,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
case MMC_TIMING_UHS_SDR25:
case MMC_TIMING_UHS_SDR50:
case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS:
case MMC_TIMING_MMC_HS200:
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
break;
@@ -1042,6 +1104,19 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
SDHCI_TIMEOUT_CONTROL);
}
+static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
@@ -1058,6 +1133,7 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.set_bus_width = esdhc_pltfm_set_bus_width,
.set_uhs_signaling = esdhc_set_uhs_signaling,
.reset = esdhc_reset,
+ .irq = esdhc_cqhci_irq,
};
static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -1095,16 +1171,34 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
| ESDHC_BURST_LEN_EN_INCR,
host->ioaddr + SDHCI_HOST_CONTROL);
+
/*
- * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
- * TO1.1, it's harmless for MX6SL
- */
- writel(readl(host->ioaddr + 0x6c) | BIT(7),
+ * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+ * TO1.1, it's harmless for MX6SL
+ */
+ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
host->ioaddr + 0x6c);
/* disable DLL_CTRL delay line settings */
writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
+ /*
+ * For the case of command with busy, if set the bit
+ * ESDHC_VEND_SPEC2_EN_BUSY_IRQ, USDHC will generate a
+ * transfer complete interrupt when busy is deasserted.
+ * When CQHCI use DCMD to send a CMD need R1b respons,
+ * CQHCI require to set ESDHC_VEND_SPEC2_EN_BUSY_IRQ,
+ * otherwise DCMD will always meet timeout waiting for
+ * hardware interrupt issue.
+ */
+ if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) {
+ tmp = readl(host->ioaddr + ESDHC_VEND_SPEC2);
+ tmp |= ESDHC_VEND_SPEC2_EN_BUSY_IRQ;
+ writel(tmp, host->ioaddr + ESDHC_VEND_SPEC2);
+
+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+ }
+
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
tmp |= ESDHC_STD_TUNING_EN |
@@ -1120,10 +1214,81 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
<< ESDHC_TUNING_STEP_SHIFT;
}
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
+ } else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+ /*
+ * ESDHC_STD_TUNING_EN may be configed in bootloader
+ * or ROM code, so clear this bit here to make sure
+ * the manual tuning can work.
+ */
+ tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ tmp &= ~ESDHC_STD_TUNING_EN;
+ writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
}
}
}
+static void esdhc_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 reg;
+ u16 mode;
+ int count = 10;
+
+ /*
+ * CQE gets stuck if it sees Buffer Read Enable bit set, which can be
+ * the case after tuning, so ensure the buffer is drained.
+ */
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ if (count-- == 0) {
+ dev_warn(mmc_dev(host->mmc),
+ "CQE may get stuck because the Buffer Read Enable bit is set\n");
+ break;
+ }
+ mdelay(1);
+ }
+
+ /*
+ * Runtime resume will reset the entire host controller, which
+ * will also clear the DMAEN/BCEN of register ESDHC_MIX_CTRL.
+ * Here set DMAEN and BCEN when enable CMDQ.
+ */
+ mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ mode |= SDHCI_TRNS_DMA;
+ if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
+ mode |= SDHCI_TRNS_BLK_CNT_EN;
+ sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+
+ /*
+ * Though Runtime resume reset the entire host controller,
+ * but do not impact the CQHCI side, need to clear the
+ * HALT bit, avoid CQHCI stuck in the first request when
+ * system resume back.
+ */
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+ if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT)
+ dev_err(mmc_dev(host->mmc),
+ "failed to exit halt state when enable CQE\n");
+
+
+ sdhci_cqe_enable(mmc);
+}
+
+static void esdhc_sdhci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static const struct cqhci_host_ops esdhc_cqhci_ops = {
+ .enable = esdhc_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = esdhc_sdhci_dumpregs,
+};
+
#ifdef CONFIG_OF
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
@@ -1200,7 +1365,7 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
host->mmc->parent->platform_data);
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
- err = mmc_gpiod_request_ro(host->mmc, "wp", 0, false, 0, NULL);
+ err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request write-protect gpio!\n");
@@ -1255,6 +1420,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
of_match_device(imx_esdhc_dt_ids, &pdev->dev);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
+ struct cqhci_host *cq_host;
int err;
struct pltfm_imx_data *imx_data;
@@ -1321,6 +1487,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+
+ /*
+ * Link usdhc specific mmc_host_ops execute_tuning function,
+ * to replace the standard one in sdhci_ops.
+ */
+ host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
@@ -1333,6 +1505,28 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
+ if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+ host->mmc->caps2 |= MMC_CAP2_HS400_ES;
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ esdhc_hs400_enhanced_strobe;
+ }
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) {
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ err = -ENOMEM;
+ goto disable_ahb_clk;
+ }
+
+ cq_host->mmio = host->ioaddr + ESDHC_CQHCI_ADDR_OFFSET;
+ cq_host->ops = &esdhc_cqhci_ops;
+
+ err = cqhci_init(cq_host, host->mmc, false);
+ if (err)
+ goto disable_ahb_clk;
+ }
+
if (of_id)
err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
else
@@ -1340,6 +1534,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_ahb_clk;
+ host->tuning_delay = 1;
+
sdhci_esdhc_imx_hwinit(host);
err = sdhci_add_host(host);
@@ -1391,6 +1587,13 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
static int sdhci_esdhc_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ int ret;
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
@@ -1401,11 +1604,19 @@ static int sdhci_esdhc_suspend(struct device *dev)
static int sdhci_esdhc_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ int ret;
/* re-initialize hw state in case it's lost in low power mode */
sdhci_esdhc_imx_hwinit(host);
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret)
+ return ret;
+
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ ret = cqhci_resume(host->mmc);
+
+ return ret;
}
#endif
@@ -1417,6 +1628,12 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_runtime_suspend_host(host);
if (ret)
return ret;
@@ -1460,7 +1677,10 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
if (err)
goto disable_ipg_clk;
- return 0;
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ err = cqhci_resume(host->mmc);
+
+ return err;
disable_ipg_clk:
if (!sdhci_sdio_irq_enabled(host))
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index c11c18a9aacb..b1a66ca3821a 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1097,7 +1097,6 @@ static int sdhci_omap_probe(struct platform_device *pdev)
goto err_put_sync;
}
- host->mmc_host_ops.get_ro = mmc_gpio_get_ro;
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_omap_start_signal_voltage_switch;
host->mmc_host_ops.set_ios = sdhci_omap_set_ios;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 2a6eba74b94e..99b0fec2836b 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1257,16 +1257,6 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
}
#endif
-static const struct sdhci_pci_fixes sdhci_o2 = {
- .probe = sdhci_pci_o2_probe,
- .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
- .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
- .probe_slot = sdhci_pci_o2_probe_slot,
-#ifdef CONFIG_PM_SLEEP
- .resume = sdhci_pci_o2_resume,
-#endif
-};
-
static const struct sdhci_pci_fixes sdhci_jmicron = {
.probe = jmicron_probe,
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index cc3ffeffd7a2..05a012a694b2 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -60,6 +60,13 @@
#define O2_SD_VENDOR_SETTING2 0x1C8
#define O2_SD_HW_TUNING_DISABLE BIT(4)
+#define O2_PLL_WDT_CONTROL1 0x1CC
+#define O2_PLL_FORCE_ACTIVE BIT(18)
+#define O2_PLL_LOCK_STATUS BIT(14)
+#define O2_PLL_SOFT_RESET BIT(12)
+
+#define O2_SD_DETECT_SETTING 0x324
+
static void sdhci_o2_set_tuning_mode(struct sdhci_host *host)
{
u16 reg;
@@ -283,6 +290,113 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
host->irq = pci_irq_vector(chip->pdev, 0);
}
+static void sdhci_o2_wait_card_detect_stable(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u32 scratch32;
+
+ /* Wait max 50 ms */
+ timeout = ktime_add_ms(ktime_get(), 50);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ scratch32 = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ if ((scratch32 & SDHCI_CARD_PRESENT) >> SDHCI_CARD_PRES_SHIFT
+ == (scratch32 & SDHCI_CD_LVL) >> SDHCI_CD_LVL_SHIFT)
+ break;
+
+ if (timedout) {
+ pr_err("%s: Card Detect debounce never finished.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ udelay(10);
+ }
+}
+
+static void sdhci_o2_enable_internal_clock(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u16 scratch;
+ u32 scratch32;
+
+ /* PLL software reset */
+ scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1);
+ scratch32 |= O2_PLL_SOFT_RESET;
+ sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
+ udelay(1);
+ scratch32 &= ~(O2_PLL_SOFT_RESET);
+ sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
+
+ /* PLL force active */
+ scratch32 |= O2_PLL_FORCE_ACTIVE;
+ sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ scratch = sdhci_readw(host, O2_PLL_WDT_CONTROL1);
+ if (scratch & O2_PLL_LOCK_STATUS)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ goto out;
+ }
+ udelay(10);
+ }
+
+ /* Wait for card detect finish */
+ udelay(1);
+ sdhci_o2_wait_card_detect_stable(host);
+
+out:
+ /* Cancel PLL force active */
+ scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1);
+ scratch32 &= ~O2_PLL_FORCE_ACTIVE;
+ sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
+}
+
+static int sdhci_o2_get_cd(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_o2_enable_internal_clock(host);
+
+ return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+}
+
+static void sdhci_o2_enable_clk(struct sdhci_host *host, u16 clk)
+{
+ /* Enable internal clock */
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (sdhci_o2_get_cd(host->mmc)) {
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ sdhci_o2_enable_clk(host, clk);
+}
+
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
@@ -314,9 +428,14 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
mmc_hostname(host->mmc));
host->flags &= ~SDHCI_SIGNALING_330;
host->flags |= SDHCI_SIGNALING_180;
+ host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
host->mmc->caps2 |= MMC_CAP2_NO_SD;
host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
+ pci_write_config_dword(chip->pdev,
+ O2_SD_DETECT_SETTING, 3);
}
+
+ slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
}
host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
@@ -490,9 +609,6 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
case PCI_DEVICE_ID_O2_SEABIRD0:
- if (chip->pdev->revision == 0x01)
- chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
- /* fall through */
case PCI_DEVICE_ID_O2_SEABIRD1:
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev,
@@ -550,3 +666,21 @@ int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
return sdhci_pci_resume_host(chip);
}
#endif
+
+static const struct sdhci_ops sdhci_pci_o2_ops = {
+ .set_clock = sdhci_pci_o2_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+const struct sdhci_pci_fixes sdhci_o2 = {
+ .probe = sdhci_pci_o2_probe,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = sdhci_pci_o2_probe_slot,
+#ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_o2_resume,
+#endif
+ .ops = &sdhci_pci_o2_ops,
+};
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 2ef0bdca9197..4ddb69a15cd7 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -179,13 +179,9 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
int sdhci_pci_enable_dma(struct sdhci_host *host);
-int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
-int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
-#ifdef CONFIG_PM_SLEEP
-int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
-#endif
extern const struct sdhci_pci_fixes sdhci_arasan;
extern const struct sdhci_pci_fixes sdhci_snps;
+extern const struct sdhci_pci_fixes sdhci_o2;
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 2c3827f54927..cdc8e16b4567 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -23,7 +23,6 @@
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/pxa_sdhci.h>
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index e6ace31e2a41..32e62904c0d3 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -33,6 +33,7 @@
#include <linux/ktime.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
/* Tegra SDHOST controller vendor register definitions */
#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
@@ -75,6 +76,7 @@
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
+#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000
#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
@@ -89,6 +91,9 @@
#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
+/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
+#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
+
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
u32 nvquirks;
@@ -121,6 +126,8 @@ struct sdhci_tegra {
struct pinctrl *pinctrl_sdmmc;
struct pinctrl_state *pinctrl_state_3v3;
struct pinctrl_state *pinctrl_state_1v8;
+ struct pinctrl_state *pinctrl_state_3v3_drv;
+ struct pinctrl_state *pinctrl_state_1v8_drv;
struct sdhci_tegra_autocal_offsets autocal_offsets;
ktime_t last_calib;
@@ -128,6 +135,7 @@ struct sdhci_tegra {
u32 default_tap;
u32 default_trim;
u32 dqs_trim;
+ bool enable_hwcq;
};
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
@@ -237,11 +245,6 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
}
}
-static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
-{
- return mmc_gpio_get_ro(host->mmc);
-}
-
static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -411,6 +414,76 @@ static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
}
+static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
+ bool state_drvupdn)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_tegra_autocal_offsets *offsets =
+ &tegra_host->autocal_offsets;
+ struct pinctrl_state *pinctrl_drvupdn = NULL;
+ int ret = 0;
+ u8 drvup = 0, drvdn = 0;
+ u32 reg;
+
+ if (!state_drvupdn) {
+ /* PADS Drive Strength */
+ if (voltage == MMC_SIGNAL_VOLTAGE_180) {
+ if (tegra_host->pinctrl_state_1v8_drv) {
+ pinctrl_drvupdn =
+ tegra_host->pinctrl_state_1v8_drv;
+ } else {
+ drvup = offsets->pull_up_1v8_timeout;
+ drvdn = offsets->pull_down_1v8_timeout;
+ }
+ } else {
+ if (tegra_host->pinctrl_state_3v3_drv) {
+ pinctrl_drvupdn =
+ tegra_host->pinctrl_state_3v3_drv;
+ } else {
+ drvup = offsets->pull_up_3v3_timeout;
+ drvdn = offsets->pull_down_3v3_timeout;
+ }
+ }
+
+ if (pinctrl_drvupdn != NULL) {
+ ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
+ pinctrl_drvupdn);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "failed pads drvupdn, ret: %d\n", ret);
+ } else if ((drvup) || (drvdn)) {
+ reg = sdhci_readl(host,
+ SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
+ reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
+ reg |= (drvup << 20) | (drvdn << 12);
+ sdhci_writel(host, reg,
+ SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
+ }
+
+ } else {
+ /* Dual Voltage PADS Voltage selection */
+ if (!tegra_host->pad_control_available)
+ return 0;
+
+ if (voltage == MMC_SIGNAL_VOLTAGE_180) {
+ ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
+ tegra_host->pinctrl_state_1v8);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "setting 1.8V failed, ret: %d\n", ret);
+ } else {
+ ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
+ tegra_host->pinctrl_state_3v3);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "setting 3.3V failed, ret: %d\n", ret);
+ }
+ }
+
+ return ret;
+}
+
static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -437,6 +510,7 @@ static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
}
+ /* Set initial offset before auto-calibration */
tegra_sdhci_set_pad_autocal_offset(host, pdpu);
card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
@@ -460,19 +534,15 @@ static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
if (ret) {
dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
- pdpu = offsets.pull_down_1v8_timeout << 8 |
- offsets.pull_up_1v8_timeout;
- else
- pdpu = offsets.pull_down_3v3_timeout << 8 |
- offsets.pull_up_3v3_timeout;
-
- /* Disable automatic calibration and use fixed offsets */
+ /* Disable automatic cal and use fixed Drive Strengths */
reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
reg &= ~SDHCI_AUTO_CAL_ENABLE;
sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
- tegra_sdhci_set_pad_autocal_offset(host, pdpu);
+ ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "Setting drive strengths failed: %d\n", ret);
}
}
@@ -511,26 +581,46 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
&autocal->pull_up_3v3_timeout);
- if (err)
+ if (err) {
+ if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
+ (tegra_host->pinctrl_state_3v3_drv == NULL))
+ pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
+ mmc_hostname(host->mmc));
autocal->pull_up_3v3_timeout = 0;
+ }
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
&autocal->pull_down_3v3_timeout);
- if (err)
+ if (err) {
+ if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
+ (tegra_host->pinctrl_state_3v3_drv == NULL))
+ pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
+ mmc_hostname(host->mmc));
autocal->pull_down_3v3_timeout = 0;
+ }
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
&autocal->pull_up_1v8_timeout);
- if (err)
+ if (err) {
+ if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
+ (tegra_host->pinctrl_state_1v8_drv == NULL))
+ pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
+ mmc_hostname(host->mmc));
autocal->pull_up_1v8_timeout = 0;
+ }
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
&autocal->pull_down_1v8_timeout);
- if (err)
+ if (err) {
+ if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
+ (tegra_host->pinctrl_state_1v8_drv == NULL))
+ pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
+ mmc_hostname(host->mmc));
autocal->pull_down_1v8_timeout = 0;
+ }
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-sdr104",
@@ -595,6 +685,20 @@ static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
tegra_host->dqs_trim = 0x11;
}
+static void tegra_sdhci_parse_dt(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
+ tegra_host->enable_hwcq = true;
+ else
+ tegra_host->enable_hwcq = false;
+
+ tegra_sdhci_parse_pad_autocal_dt(host);
+ tegra_sdhci_parse_tap_and_trim(host);
+}
+
static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -743,32 +847,6 @@ static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
return mmc_send_tuning(host->mmc, opcode, NULL);
}
-static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
- int ret;
-
- if (!tegra_host->pad_control_available)
- return 0;
-
- if (voltage == MMC_SIGNAL_VOLTAGE_180) {
- ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
- tegra_host->pinctrl_state_1v8);
- if (ret < 0)
- dev_err(mmc_dev(host->mmc),
- "setting 1.8V failed, ret: %d\n", ret);
- } else {
- ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
- tegra_host->pinctrl_state_3v3);
- if (ret < 0)
- dev_err(mmc_dev(host->mmc),
- "setting 3.3V failed, ret: %d\n", ret);
- }
-
- return ret;
-}
-
static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -778,7 +856,7 @@ static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
int ret = 0;
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
- ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage);
+ ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
if (ret < 0)
return ret;
ret = sdhci_start_signal_voltage_switch(mmc, ios);
@@ -786,7 +864,7 @@ static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
ret = sdhci_start_signal_voltage_switch(mmc, ios);
if (ret < 0)
return ret;
- ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage);
+ ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
}
if (tegra_host->pad_calib_required)
@@ -805,6 +883,20 @@ static int tegra_sdhci_init_pinctrl_info(struct device *dev,
return -1;
}
+ tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
+ tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
+ if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
+ if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
+ tegra_host->pinctrl_state_1v8_drv = NULL;
+ }
+
+ tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
+ tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
+ if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
+ if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
+ tegra_host->pinctrl_state_3v3_drv = NULL;
+ }
+
tegra_host->pinctrl_state_3v3 =
pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
@@ -836,8 +928,50 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
tegra_host->pad_calib_required = true;
}
+static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 cqcfg = 0;
+
+ /*
+ * Tegra SDMMC Controller design prevents write access to BLOCK_COUNT
+ * registers when CQE is enabled.
+ */
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ if (cqcfg & CQHCI_ENABLE)
+ cqhci_writel(cq_host, (cqcfg & ~CQHCI_ENABLE), CQHCI_CFG);
+
+ sdhci_cqe_enable(mmc);
+
+ if (cqcfg & CQHCI_ENABLE)
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+}
+
+static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
+ .enable = sdhci_tegra_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_tegra_dumpregs,
+};
+
static const struct sdhci_ops tegra_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = tegra_sdhci_set_clock,
@@ -893,7 +1027,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
};
static const struct sdhci_ops tegra114_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_w = tegra_sdhci_writew,
.write_l = tegra_sdhci_writel,
@@ -947,7 +1080,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
};
static const struct sdhci_ops tegra210_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_w = tegra210_sdhci_writew,
.write_l = tegra_sdhci_writel,
@@ -980,7 +1112,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
};
static const struct sdhci_ops tegra186_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = tegra_sdhci_set_clock,
@@ -989,6 +1120,7 @@ static const struct sdhci_ops tegra186_sdhci_ops = {
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
.voltage_switch = tegra_sdhci_voltage_switch,
.get_max_clock = tegra_sdhci_get_max_clock,
+ .irq = sdhci_tegra_cqhci_irq,
};
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
@@ -1030,6 +1162,54 @@ static const struct of_device_id sdhci_tegra_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
+static int sdhci_tegra_add_host(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!tegra_host->enable_hwcq)
+ return sdhci_add_host(host);
+
+ sdhci_enable_v4_mode(host);
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+
+ cq_host = devm_kzalloc(host->mmc->parent,
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_tegra_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_tegra_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -1077,9 +1257,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
host->mmc->caps |= MMC_CAP_1_8V_DDR;
- tegra_sdhci_parse_pad_autocal_dt(host);
-
- tegra_sdhci_parse_tap_and_trim(host);
+ tegra_sdhci_parse_dt(host);
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
GPIOD_OUT_HIGH);
@@ -1117,7 +1295,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
usleep_range(2000, 4000);
- rc = sdhci_add_host(host);
+ rc = sdhci_tegra_add_host(host);
if (rc)
goto err_add_host;
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 5b5eb53a63d2..8d07ee1b8f08 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -530,7 +530,7 @@ static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host,
ret = true;
break;
}
- /* else: fall through */
+ /* fall through */
default:
reg &= ~XENON_TIMING_ADJUST_SLOW_MODE;
ret = false;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index eba9bcc92ad3..a8141ff9be03 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -883,7 +883,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
bool *too_big)
{
u8 count;
- struct mmc_data *data = cmd->data;
+ struct mmc_data *data;
unsigned target_timeout, current_timeout;
*too_big = true;
@@ -897,6 +897,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return 0xE;
+ /* Unspecified command, asume max */
+ if (cmd == NULL)
+ return 0xE;
+
+ data = cmd->data;
/* Unspecified timeout, assume max */
if (!data && !cmd->busy_timeout)
return 0xE;
@@ -2048,6 +2053,8 @@ static int sdhci_check_ro(struct sdhci_host *host)
is_readonly = 0;
else if (host->ops->get_ro)
is_readonly = host->ops->get_ro(host);
+ else if (mmc_can_gpio_ro(host->mmc))
+ is_readonly = mmc_gpio_get_ro(host->mmc);
else
is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
& SDHCI_WRITE_PROTECT);
@@ -2376,6 +2383,10 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
return -ETIMEDOUT;
}
+ /* Spec does not require a delay between tuning cycles */
+ if (host->tuning_delay > 0)
+ mdelay(host->tuning_delay);
+
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
if (ctrl & SDHCI_CTRL_TUNED_CLK)
@@ -2383,9 +2394,6 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
break;
}
- /* Spec does not require a delay between tuning cycles */
- if (host->tuning_delay > 0)
- mdelay(host->tuning_delay);
}
pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
@@ -3353,7 +3361,14 @@ void sdhci_cqe_enable(struct mmc_host *mmc)
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
ctrl &= ~SDHCI_CTRL_DMA_MASK;
- if (host->flags & SDHCI_USE_64_BIT_DMA)
+ /*
+ * Host from V4.10 supports ADMA3 DMA type.
+ * ADMA3 performs integrated descriptor which is more suitable
+ * for cmd queuing to fetch both command and transfer descriptors.
+ */
+ if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
+ ctrl |= SDHCI_CTRL_ADMA3;
+ else if (host->flags & SDHCI_USE_64_BIT_DMA)
ctrl |= SDHCI_CTRL_ADMA64;
else
ctrl |= SDHCI_CTRL_ADMA32;
@@ -3363,7 +3378,7 @@ void sdhci_cqe_enable(struct mmc_host *mmc)
SDHCI_BLOCK_SIZE);
/* Set maximum timeout */
- sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
+ sdhci_set_timeout(host, NULL);
host->ier = host->cqe_ier;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 6cc9a3c2ac66..01002cba1359 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -73,6 +73,10 @@
#define SDHCI_SPACE_AVAILABLE 0x00000400
#define SDHCI_DATA_AVAILABLE 0x00000800
#define SDHCI_CARD_PRESENT 0x00010000
+#define SDHCI_CARD_PRES_SHIFT 16
+#define SDHCI_CD_STABLE 0x00020000
+#define SDHCI_CD_LVL 0x00040000
+#define SDHCI_CD_LVL_SHIFT 18
#define SDHCI_WRITE_PROTECT 0x00080000
#define SDHCI_DATA_LVL_MASK 0x00F00000
#define SDHCI_DATA_LVL_SHIFT 20
@@ -88,6 +92,7 @@
#define SDHCI_CTRL_ADMA1 0x08
#define SDHCI_CTRL_ADMA32 0x10
#define SDHCI_CTRL_ADMA64 0x18
+#define SDHCI_CTRL_ADMA3 0x18
#define SDHCI_CTRL_8BITBUS 0x20
#define SDHCI_CTRL_CDTEST_INS 0x40
#define SDHCI_CTRL_CDTEST_EN 0x80
@@ -230,6 +235,7 @@
#define SDHCI_RETUNING_MODE_SHIFT 14
#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
#define SDHCI_CLOCK_MUL_SHIFT 16
+#define SDHCI_CAN_DO_ADMA3 0x08000000
#define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */
#define SDHCI_CAPABILITIES_1 0x44
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 8c05879850a0..eea183e90f1b 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -158,7 +158,7 @@ static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-struct sdhci_ops sdhci_am654_ops = {
+static struct sdhci_ops sdhci_am654_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_uhs_signaling = sdhci_set_uhs_signaling,
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 70fadc976795..2901a5773d83 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -19,7 +19,6 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -32,7 +31,6 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index c03529e3f01a..2adb0d24360f 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ iowrite32(val, host->ctl + (addr << host->bus_shift));
+}
+
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 085a0fab769c..595949f1f001 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
+#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/workqueue.h>
@@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
return false;
}
-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
@@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
- return;
+ return false;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
@@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
+
+ return ireg;
}
irqreturn_t tmio_mmc_irq(int irq, void *devid)
@@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED;
- __tmio_mmc_sdio_irq(host);
+ if (__tmio_mmc_sdio_irq(host))
+ return IRQ_HANDLED;
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
EXPORT_SYMBOL_GPL(tmio_mmc_irq);
@@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
/* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
- sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+ if (host->mmc->max_blk_count >= SZ_64K)
+ sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
+ else
+ sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
tmio_mmc_start_dma(host, data);
@@ -1066,7 +1073,7 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
/* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
- mmc->ocr_avail = pdata->ocr_mask;
+ mmc->ocr_avail = pdata->ocr_mask;
/*
* try again.
@@ -1287,6 +1294,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 3ba42f508014..4fd6da29489e 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/of.h>
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 6e8e7b1bb34b..79a53cb8507b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -756,7 +756,8 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
}
numvirtchips = cfi->numchips * numparts;
- newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+ newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
+ GFP_KERNEL);
if (!newcfi)
return -ENOMEM;
shared = kmalloc_array(cfi->numchips,
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 837b04ab96a9..839ed40625d6 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -135,7 +135,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
* our caller, and copy the appropriate data into them.
*/
- retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
+ retcfi = kmalloc(struct_size(retcfi, chips, cfi.numchips), GFP_KERNEL);
if (!retcfi) {
kfree(cfi.cfiq);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 4c94fc096696..7754803e3463 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1767,8 +1767,8 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
- docg3->device_id);
+ mtd->name = devm_kasprintf(docg3->dev, GFP_KERNEL, "docg3.%d",
+ docg3->device_id);
if (!mtd->name)
return -ENOMEM;
docg3->max_block = 2047;
@@ -1872,7 +1872,7 @@ nomem3:
nomem2:
kfree(docg3);
nomem1:
- return ERR_PTR(ret);
+ return ret ? ERR_PTR(ret) : NULL;
}
/**
@@ -1886,7 +1886,6 @@ static void doc_release_device(struct mtd_info *mtd)
mtd_device_unregister(mtd);
kfree(docg3->bbt);
kfree(docg3);
- kfree(mtd->name);
kfree(mtd);
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index c4a1d04b8c80..651bab6d4e31 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -195,7 +195,14 @@ static int m25p_probe(struct spi_mem *spimem)
spi_mem_set_drvdata(spimem, flash);
flash->spimem = spimem;
- if (spi->mode & SPI_RX_QUAD) {
+ if (spi->mode & SPI_RX_OCTAL) {
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+
+ if (spi->mode & SPI_TX_OCTAL)
+ hwcaps.mask |= (SNOR_HWCAPS_READ_1_8_8 |
+ SNOR_HWCAPS_PP_1_1_8 |
+ SNOR_HWCAPS_PP_1_8_8);
+ } else if (spi->mode & SPI_RX_QUAD) {
hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
if (spi->mode & SPI_TX_QUAD)
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 46238796145f..1c97fabc4bf9 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -24,14 +24,12 @@ static unsigned long writebuf_size = 64;
#define MTDRAM_TOTAL_SIZE (total_size * 1024)
#define MTDRAM_ERASE_SIZE (erase_size * 1024)
-#ifdef MODULE
module_param(total_size, ulong, 0);
MODULE_PARM_DESC(total_size, "Total device size in KiB");
module_param(erase_size, ulong, 0);
MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
module_param(writebuf_size, ulong, 0);
MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)");
-#endif
// We could store these in the mtd structure, but we only support 1 device..
static struct mtd_info *mtd_info;
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 22f753e555ac..83f88b8b5d9f 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev,
* Going to have to check what details I need to set and how to
* get them
*/
- mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_WRITEABLE;
mtd->size = size;
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 69f2112340b1..175bdc3b72f4 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -181,8 +181,8 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
- retlpddr = kzalloc(sizeof(struct lpddr_private) +
- numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+ retlpddr = kzalloc(struct_size(retlpddr, chips, numvirtchips),
+ GFP_KERNEL);
if (!retlpddr)
return NULL;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 999b705769a8..76b4264936ff 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -155,7 +155,6 @@ static ssize_t mtd_flags_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
-
}
static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
@@ -166,7 +165,6 @@ static ssize_t mtd_size_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)mtd->size);
-
}
static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
@@ -176,7 +174,6 @@ static ssize_t mtd_erasesize_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
-
}
static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
@@ -186,7 +183,6 @@ static ssize_t mtd_writesize_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
-
}
static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
@@ -197,7 +193,6 @@ static ssize_t mtd_subpagesize_show(struct device *dev,
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
-
}
static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
@@ -207,7 +202,6 @@ static ssize_t mtd_oobsize_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
-
}
static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
@@ -226,7 +220,6 @@ static ssize_t mtd_numeraseregions_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
-
}
static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
NULL);
@@ -237,7 +230,6 @@ static ssize_t mtd_name_show(struct device *dev,
struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
-
}
static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
@@ -507,6 +499,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
{
struct nvmem_config config = {};
+ config.id = -1;
config.dev = &mtd->dev;
config.name = mtd->name;
config.owner = THIS_MODULE;
@@ -559,6 +552,14 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
+ /*
+ * MTD drivers should implement ->_{write,read}() or
+ * ->_{write,read}_oob(), but not both.
+ */
+ if (WARN_ON((mtd->_write && mtd->_write_oob) ||
+ (mtd->_read && mtd->_read_oob)))
+ return -EINVAL;
+
if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
!(mtd->flags & MTD_NO_ERASE)))
return -EINVAL;
@@ -1089,67 +1090,32 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf)
{
- int ret_code;
- *retlen = 0;
- if (from < 0 || from >= mtd->size || len > mtd->size - from)
- return -EINVAL;
- if (!len)
- return 0;
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+ int ret;
- ledtrig_mtd_activity();
- /*
- * In the absence of an error, drivers return a non-negative integer
- * representing the maximum number of bitflips that were corrected on
- * any one ecc region (if applicable; zero otherwise).
- */
- if (mtd->_read) {
- ret_code = mtd->_read(mtd, from, len, retlen, buf);
- } else if (mtd->_read_oob) {
- struct mtd_oob_ops ops = {
- .len = len,
- .datbuf = buf,
- };
-
- ret_code = mtd->_read_oob(mtd, from, &ops);
- *retlen = ops.retlen;
- } else {
- return -ENOTSUPP;
- }
+ ret = mtd_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
- if (unlikely(ret_code < 0))
- return ret_code;
- if (mtd->ecc_strength == 0)
- return 0; /* device lacks ecc */
- return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_read);
int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
{
- *retlen = 0;
- if (to < 0 || to >= mtd->size || len > mtd->size - to)
- return -EINVAL;
- if ((!mtd->_write && !mtd->_write_oob) ||
- !(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- if (!len)
- return 0;
- ledtrig_mtd_activity();
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
- if (!mtd->_write) {
- struct mtd_oob_ops ops = {
- .len = len,
- .datbuf = (u8 *)buf,
- };
- int ret;
+ ret = mtd_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
- ret = mtd->_write_oob(mtd, to, &ops);
- *retlen = ops.retlen;
- return ret;
- }
-
- return mtd->_write(mtd, to, len, retlen, buf);
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_write);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 1a55d3e3d4c5..e604625e2dfa 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -541,4 +541,21 @@ config MTD_NAND_TEGRA
is supported. Extra OOB bytes when using HW ECC are currently
not supported.
+config MTD_NAND_STM32_FMC2
+ tristate "Support for NAND controller on STM32MP SoCs"
+ depends on MACH_STM32MP157 || COMPILE_TEST
+ help
+ Enables support for NAND Flash chips on SoCs containing the FMC2
+ NAND controller. This controller is found on STM32MP SoCs.
+ The controller supports a maximum 8k page size and supports
+ a maximum 8-bit correction error per sector of 512 bytes.
+
+config MTD_NAND_MESON
+ tristate "Support for NAND controller on Amlogic's Meson SoCs"
+ depends on ARCH_MESON || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Enables support for NAND controller on Amlogic's Meson SoCs.
+ This controller is found on Meson SoCs.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 57159b349054..5a5a72f0793e 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -56,6 +56,8 @@ obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
+obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
+obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index 555a74e15269..9d3997840889 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -876,23 +876,32 @@ static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev,
{
struct platform_device *pdev;
struct atmel_pmecc *pmecc, **ptr;
+ int ret;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
+ pmecc = platform_get_drvdata(pdev);
+ if (!pmecc) {
+ ret = -EPROBE_DEFER;
+ goto err_put_device;
+ }
ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- get_device(&pdev->dev);
- pmecc = platform_get_drvdata(pdev);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto err_put_device;
+ }
*ptr = pmecc;
devres_add(userdev, ptr);
return pmecc;
+
+err_put_device:
+ put_device(&pdev->dev);
+ return ERR_PTR(ret);
}
static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 };
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 6e8edc9375dd..24aeafc67cd4 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -37,9 +37,6 @@
#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
-/* MAP10 commands */
-#define DENALI_ERASE 0x01
-
#define DENALI_BANK(denali) ((denali)->active_bank << 24)
#define DENALI_INVALID_BANK -1
@@ -476,7 +473,7 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
}
static int denali_pio_read(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int raw)
+ size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf;
@@ -504,7 +501,7 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
}
static int denali_pio_write(struct denali_nand_info *denali,
- const void *buf, size_t size, int page, int raw)
+ const void *buf, size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf;
@@ -525,16 +522,16 @@ static int denali_pio_write(struct denali_nand_info *denali,
}
static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int raw, int write)
+ size_t size, int page, int write)
{
if (write)
- return denali_pio_write(denali, buf, size, page, raw);
+ return denali_pio_write(denali, buf, size, page);
else
- return denali_pio_read(denali, buf, size, page, raw);
+ return denali_pio_read(denali, buf, size, page);
}
static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
- size_t size, int page, int raw, int write)
+ size_t size, int page, int write)
{
dma_addr_t dma_addr;
uint32_t irq_mask, irq_status, ecc_err_mask;
@@ -544,7 +541,7 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
dma_addr = dma_map_single(denali->dev, buf, size, dir);
if (dma_mapping_error(denali->dev, dma_addr)) {
dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
- return denali_pio_xfer(denali, buf, size, page, raw, write);
+ return denali_pio_xfer(denali, buf, size, page, write);
}
if (write) {
@@ -598,9 +595,9 @@ static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail)
- return denali_dma_xfer(denali, buf, size, page, raw, write);
+ return denali_dma_xfer(denali, buf, size, page, write);
else
- return denali_pio_xfer(denali, buf, size, page, raw, write);
+ return denali_pio_xfer(denali, buf, size, page, write);
}
static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
@@ -754,9 +751,6 @@ static int denali_read_oob(struct nand_chip *chip, int page)
static int denali_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct denali_nand_info *denali = mtd_to_denali(mtd);
-
- denali_reset_irq(denali);
denali_oob_xfer(mtd, chip, page, 1);
@@ -903,23 +897,6 @@ static int denali_waitfunc(struct nand_chip *chip)
return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
}
-static int denali_erase(struct nand_chip *chip, int page)
-{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- uint32_t irq_status;
-
- denali_reset_irq(denali);
-
- denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
- DENALI_ERASE);
-
- /* wait for erase to complete or failure to occur */
- irq_status = denali_wait_for_irq(denali,
- INTR__ERASE_COMP | INTR__ERASE_FAIL);
-
- return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
-}
-
static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf)
{
@@ -1244,7 +1221,6 @@ static int denali_attach_chip(struct nand_chip *chip)
chip->ecc.write_page_raw = denali_write_page_raw;
chip->ecc.read_oob = denali_read_oob;
chip->ecc.write_oob = denali_write_oob;
- chip->legacy.erase = denali_erase;
ret = denali_multidev_fixup(denali);
if (ret)
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index 25c00601b8b3..c8c2620fc736 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -304,7 +304,6 @@ struct denali_nand_info {
u32 irq_status; /* interrupts that have happened */
int irq;
void *buf; /* for syndrome layout conversion */
- dma_addr_t dma_addr;
int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */
int oob_skip_bytes; /* number of bytes reserved for BBM */
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 7c6a8a426606..0b5ae2418815 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -109,25 +109,17 @@ static int denali_dt_probe(struct platform_device *pdev)
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
- /*
- * A single anonymous clock is supported for the backward compatibility.
- * New platforms should support all the named clocks.
- */
dt->clk = devm_clk_get(dev, "nand");
if (IS_ERR(dt->clk))
- dt->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(dt->clk)) {
- dev_err(dev, "no clk available\n");
return PTR_ERR(dt->clk);
- }
dt->clk_x = devm_clk_get(dev, "nand_x");
if (IS_ERR(dt->clk_x))
- dt->clk_x = NULL;
+ return PTR_ERR(dt->clk_x);
dt->clk_ecc = devm_clk_get(dev, "ecc");
if (IS_ERR(dt->clk_ecc))
- dt->clk_ecc = NULL;
+ return PTR_ERR(dt->clk_ecc);
ret = clk_prepare_enable(dt->clk);
if (ret)
@@ -141,19 +133,8 @@ static int denali_dt_probe(struct platform_device *pdev)
if (ret)
goto out_disable_clk_x;
- if (dt->clk_x) {
- denali->clk_rate = clk_get_rate(dt->clk);
- denali->clk_x_rate = clk_get_rate(dt->clk_x);
- } else {
- /*
- * Hardcode the clock rates for the backward compatibility.
- * This works for both SOCFPGA and UniPhier.
- */
- dev_notice(dev,
- "necessary clock is missing. default clock rates are used.\n");
- denali->clk_rate = 50000000;
- denali->clk_x_rate = 200000000;
- }
+ denali->clk_rate = clk_get_rate(dt->clk);
+ denali->clk_x_rate = clk_get_rate(dt->clk_x);
ret = denali_init(denali);
if (ret)
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index c9149a37f8f0..6c7ca41354be 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -965,6 +965,19 @@ static const struct nand_controller_ops fsmc_nand_controller_ops = {
.setup_data_interface = fsmc_setup_data_interface,
};
+/**
+ * fsmc_nand_disable() - Disables the NAND bank
+ * @host: The instance to disable
+ */
+static void fsmc_nand_disable(struct fsmc_nand_data *host)
+{
+ u32 val;
+
+ val = readl(host->regs_va + FSMC_PC);
+ val &= ~FSMC_ENABLE;
+ writel(val, host->regs_va + FSMC_PC);
+}
+
/*
* fsmc_nand_probe - Probe function
* @pdev: platform device structure
@@ -1120,6 +1133,7 @@ release_dma_read_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
disable_clk:
+ fsmc_nand_disable(host);
clk_disable_unprepare(host->clk);
return ret;
@@ -1134,6 +1148,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
if (host) {
nand_release(&host->nand);
+ fsmc_nand_disable(host);
if (host->mode == USE_DMA_ACCESS) {
dma_release_channel(host->write_dma_chan);
@@ -1164,6 +1179,7 @@ static int fsmc_nand_resume(struct device *dev)
clk_prepare_enable(host->clk);
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
+ nand_reset(&host->nand, 0);
}
return 0;
diff --git a/drivers/mtd/nand/raw/jz4780_bch.c b/drivers/mtd/nand/raw/jz4780_bch.c
index 7201827809e9..c5f74ed85862 100644
--- a/drivers/mtd/nand/raw/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/jz4780_bch.c
@@ -281,12 +281,15 @@ static struct jz4780_bch *jz4780_bch_get(struct device_node *np)
struct jz4780_bch *bch;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
- get_device(&pdev->dev);
-
bch = platform_get_drvdata(pdev);
+ if (!bch) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
clk_prepare_enable(bch->clk);
return bch;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 84283c6bb0ff..f38e5c1b87e4 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2550,9 +2550,8 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
}
/* Alloc the nand chip structure */
- marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
- (nsels *
- sizeof(struct marvell_nand_chip_sel)),
+ marvell_nand = devm_kzalloc(dev,
+ struct_size(marvell_nand, sels, nsels),
GFP_KERNEL);
if (!marvell_nand) {
dev_err(dev, "could not allocate chip structure\n");
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
new file mode 100644
index 000000000000..3e8aa71407b5
--- /dev/null
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -0,0 +1,1464 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Amlogic Meson Nand Flash Controller Driver
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Liang Yang <liang.yang@amlogic.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched/task_stack.h>
+
+#define NFC_REG_CMD 0x00
+#define NFC_CMD_IDLE (0xc << 14)
+#define NFC_CMD_CLE (0x5 << 14)
+#define NFC_CMD_ALE (0x6 << 14)
+#define NFC_CMD_ADL ((0 << 16) | (3 << 20))
+#define NFC_CMD_ADH ((1 << 16) | (3 << 20))
+#define NFC_CMD_AIL ((2 << 16) | (3 << 20))
+#define NFC_CMD_AIH ((3 << 16) | (3 << 20))
+#define NFC_CMD_SEED ((8 << 16) | (3 << 20))
+#define NFC_CMD_M2N ((0 << 17) | (2 << 20))
+#define NFC_CMD_N2M ((1 << 17) | (2 << 20))
+#define NFC_CMD_RB BIT(20)
+#define NFC_CMD_SCRAMBLER_ENABLE BIT(19)
+#define NFC_CMD_SCRAMBLER_DISABLE 0
+#define NFC_CMD_SHORTMODE_DISABLE 0
+#define NFC_CMD_RB_INT BIT(14)
+
+#define NFC_CMD_GET_SIZE(x) (((x) >> 22) & GENMASK(4, 0))
+
+#define NFC_REG_CFG 0x04
+#define NFC_REG_DADR 0x08
+#define NFC_REG_IADR 0x0c
+#define NFC_REG_BUF 0x10
+#define NFC_REG_INFO 0x14
+#define NFC_REG_DC 0x18
+#define NFC_REG_ADR 0x1c
+#define NFC_REG_DL 0x20
+#define NFC_REG_DH 0x24
+#define NFC_REG_CADR 0x28
+#define NFC_REG_SADR 0x2c
+#define NFC_REG_PINS 0x30
+#define NFC_REG_VER 0x38
+
+#define NFC_RB_IRQ_EN BIT(21)
+
+#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
+ ( \
+ (cmd_dir) | \
+ ((ran) << 19) | \
+ ((bch) << 14) | \
+ ((short_mode) << 13) | \
+ (((page_size) & 0x7f) << 6) | \
+ ((pages) & 0x3f) \
+ )
+
+#define GENCMDDADDRL(adl, addr) ((adl) | ((addr) & 0xffff))
+#define GENCMDDADDRH(adh, addr) ((adh) | (((addr) >> 16) & 0xffff))
+#define GENCMDIADDRL(ail, addr) ((ail) | ((addr) & 0xffff))
+#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff))
+
+#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
+
+#define ECC_CHECK_RETURN_FF (-1)
+
+#define NAND_CE0 (0xe << 10)
+#define NAND_CE1 (0xd << 10)
+
+#define DMA_BUSY_TIMEOUT 0x100000
+#define CMD_FIFO_EMPTY_TIMEOUT 1000
+
+#define MAX_CE_NUM 2
+
+/* eMMC clock register, misc control */
+#define CLK_SELECT_NAND BIT(31)
+
+#define NFC_CLK_CYCLE 6
+
+/* nand flash controller delay 3 ns */
+#define NFC_DEFAULT_DELAY 3000
+
+#define ROW_ADDER(page, index) (((page) >> (8 * (index))) & 0xff)
+#define MAX_CYCLE_ADDRS 5
+#define DIRREAD 1
+#define DIRWRITE 0
+
+#define ECC_PARITY_BCH8_512B 14
+#define ECC_COMPLETE BIT(31)
+#define ECC_ERR_CNT(x) (((x) >> 24) & GENMASK(5, 0))
+#define ECC_ZERO_CNT(x) (((x) >> 16) & GENMASK(5, 0))
+#define ECC_UNCORRECTABLE 0x3f
+
+#define PER_INFO_BYTE 8
+
+struct meson_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ unsigned long clk_rate;
+ unsigned long level1_divider;
+ u32 bus_timing;
+ u32 twb;
+ u32 tadl;
+ u32 tbers_max;
+
+ u32 bch_mode;
+ u8 *data_buf;
+ __le64 *info_buf;
+ u32 nsels;
+ u8 sels[0];
+};
+
+struct meson_nand_ecc {
+ u32 bch;
+ u32 strength;
+};
+
+struct meson_nfc_data {
+ const struct nand_ecc_caps *ecc_caps;
+};
+
+struct meson_nfc_param {
+ u32 chip_select;
+ u32 rb_select;
+};
+
+struct nand_rw_cmd {
+ u32 cmd0;
+ u32 addrs[MAX_CYCLE_ADDRS];
+ u32 cmd1;
+};
+
+struct nand_timing {
+ u32 twb;
+ u32 tadl;
+ u32 tbers_max;
+};
+
+struct meson_nfc {
+ struct nand_controller controller;
+ struct clk *core_clk;
+ struct clk *device_clk;
+ struct clk *phase_tx;
+ struct clk *phase_rx;
+
+ unsigned long clk_rate;
+ u32 bus_timing;
+
+ struct device *dev;
+ void __iomem *reg_base;
+ struct regmap *reg_clk;
+ struct completion completion;
+ struct list_head chips;
+ const struct meson_nfc_data *data;
+ struct meson_nfc_param param;
+ struct nand_timing timing;
+ union {
+ int cmd[32];
+ struct nand_rw_cmd rw;
+ } cmdfifo;
+
+ dma_addr_t daddr;
+ dma_addr_t iaddr;
+
+ unsigned long assigned_cs;
+};
+
+enum {
+ NFC_ECC_BCH8_1K = 2,
+ NFC_ECC_BCH24_1K,
+ NFC_ECC_BCH30_1K,
+ NFC_ECC_BCH40_1K,
+ NFC_ECC_BCH50_1K,
+ NFC_ECC_BCH60_1K,
+};
+
+#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)}
+
+static struct meson_nand_ecc meson_ecc[] = {
+ MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8),
+ MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24),
+ MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30),
+ MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40),
+ MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50),
+ MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60),
+};
+
+static int meson_nand_calc_ecc_bytes(int step_size, int strength)
+{
+ int ecc_bytes;
+
+ if (step_size == 512 && strength == 8)
+ return ECC_PARITY_BCH8_512B;
+
+ ecc_bytes = DIV_ROUND_UP(strength * fls(step_size * 8), 8);
+ ecc_bytes = ALIGN(ecc_bytes, 2);
+
+ return ecc_bytes;
+}
+
+NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps,
+ meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60);
+NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps,
+ meson_nand_calc_ecc_bytes, 1024, 8);
+
+static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct meson_nfc_nand_chip, nand);
+}
+
+static void meson_nfc_select_chip(struct nand_chip *nand, int chip)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret, value;
+
+ if (chip < 0 || WARN_ON_ONCE(chip >= meson_chip->nsels))
+ return;
+
+ nfc->param.chip_select = meson_chip->sels[chip] ? NAND_CE1 : NAND_CE0;
+ nfc->param.rb_select = nfc->param.chip_select;
+ nfc->timing.twb = meson_chip->twb;
+ nfc->timing.tadl = meson_chip->tadl;
+ nfc->timing.tbers_max = meson_chip->tbers_max;
+
+ if (nfc->clk_rate != meson_chip->clk_rate) {
+ ret = clk_set_rate(nfc->device_clk, meson_chip->clk_rate);
+ if (ret) {
+ dev_err(nfc->dev, "failed to set clock rate\n");
+ return;
+ }
+ nfc->clk_rate = meson_chip->clk_rate;
+ }
+ if (nfc->bus_timing != meson_chip->bus_timing) {
+ value = (NFC_CLK_CYCLE - 1) | (meson_chip->bus_timing << 5);
+ writel(value, nfc->reg_base + NFC_REG_CFG);
+ writel((1 << 31), nfc->reg_base + NFC_REG_CMD);
+ nfc->bus_timing = meson_chip->bus_timing;
+ }
+}
+
+static void meson_nfc_cmd_idle(struct meson_nfc *nfc, u32 time)
+{
+ writel(nfc->param.chip_select | NFC_CMD_IDLE | (time & 0x3ff),
+ nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed)
+{
+ writel(NFC_CMD_SEED | (0xc2 + (seed & 0x7fff)),
+ nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
+ int scrambler)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u32 bch = meson_chip->bch_mode, cmd;
+ int len = mtd->writesize, pagesize, pages;
+
+ pagesize = nand->ecc.size;
+
+ if (raw) {
+ len = mtd->writesize + mtd->oobsize;
+ cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ return;
+ }
+
+ pages = len / nand->ecc.size;
+
+ cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch,
+ NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
+
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+}
+
+static void meson_nfc_drain_cmd(struct meson_nfc *nfc)
+{
+ /*
+ * Insert two commands to make sure all valid commands are finished.
+ *
+ * The Nand flash controller is designed as two stages pipleline -
+ * a) fetch and b) excute.
+ * There might be cases when the driver see command queue is empty,
+ * but the Nand flash controller still has two commands buffered,
+ * one is fetched into NFC request queue (ready to run), and another
+ * is actively executing. So pushing 2 "IDLE" commands guarantees that
+ * the pipeline is emptied.
+ */
+ meson_nfc_cmd_idle(nfc, 0);
+ meson_nfc_cmd_idle(nfc, 0);
+}
+
+static int meson_nfc_wait_cmd_finish(struct meson_nfc *nfc,
+ unsigned int timeout_ms)
+{
+ u32 cmd_size = 0;
+ int ret;
+
+ /* wait cmd fifo is empty */
+ ret = readl_relaxed_poll_timeout(nfc->reg_base + NFC_REG_CMD, cmd_size,
+ !NFC_CMD_GET_SIZE(cmd_size),
+ 10, timeout_ms * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for empty CMD FIFO time out\n");
+
+ return ret;
+}
+
+static int meson_nfc_wait_dma_finish(struct meson_nfc *nfc)
+{
+ meson_nfc_drain_cmd(nfc);
+
+ return meson_nfc_wait_cmd_finish(nfc, DMA_BUSY_TIMEOUT);
+}
+
+static u8 *meson_nfc_oob_ptr(struct nand_chip *nand, int i)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int len;
+
+ len = nand->ecc.size * (i + 1) + (nand->ecc.bytes + 2) * i;
+
+ return meson_chip->data_buf + len;
+}
+
+static u8 *meson_nfc_data_ptr(struct nand_chip *nand, int i)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int len, temp;
+
+ temp = nand->ecc.size + nand->ecc.bytes;
+ len = (temp + 2) * i;
+
+ return meson_chip->data_buf + len;
+}
+
+static void meson_nfc_get_data_oob(struct nand_chip *nand,
+ u8 *buf, u8 *oobbuf)
+{
+ int i, oob_len = 0;
+ u8 *dsrc, *osrc;
+
+ oob_len = nand->ecc.bytes + 2;
+ for (i = 0; i < nand->ecc.steps; i++) {
+ if (buf) {
+ dsrc = meson_nfc_data_ptr(nand, i);
+ memcpy(buf, dsrc, nand->ecc.size);
+ buf += nand->ecc.size;
+ }
+ osrc = meson_nfc_oob_ptr(nand, i);
+ memcpy(oobbuf, osrc, oob_len);
+ oobbuf += oob_len;
+ }
+}
+
+static void meson_nfc_set_data_oob(struct nand_chip *nand,
+ const u8 *buf, u8 *oobbuf)
+{
+ int i, oob_len = 0;
+ u8 *dsrc, *osrc;
+
+ oob_len = nand->ecc.bytes + 2;
+ for (i = 0; i < nand->ecc.steps; i++) {
+ if (buf) {
+ dsrc = meson_nfc_data_ptr(nand, i);
+ memcpy(dsrc, buf, nand->ecc.size);
+ buf += nand->ecc.size;
+ }
+ osrc = meson_nfc_oob_ptr(nand, i);
+ memcpy(osrc, oobbuf, oob_len);
+ oobbuf += oob_len;
+ }
+}
+
+static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms)
+{
+ u32 cmd, cfg;
+ int ret = 0;
+
+ meson_nfc_cmd_idle(nfc, nfc->timing.twb);
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ cfg |= NFC_RB_IRQ_EN;
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ init_completion(&nfc->completion);
+
+ /* use the max erase time as the maximum clock for waiting R/B */
+ cmd = NFC_CMD_RB | NFC_CMD_RB_INT
+ | nfc->param.chip_select | nfc->timing.tbers_max;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ ret = wait_for_completion_timeout(&nfc->completion,
+ msecs_to_jiffies(timeout_ms));
+ if (ret == 0)
+ ret = -1;
+
+ return ret;
+}
+
+static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int i, count;
+
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ info = &meson_chip->info_buf[i];
+ *info |= oob_buf[count];
+ *info |= oob_buf[count + 1] << 8;
+ }
+}
+
+static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int i, count;
+
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ info = &meson_chip->info_buf[i];
+ oob_buf[count] = *info;
+ oob_buf[count + 1] = *info >> 8;
+ }
+}
+
+static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
+ u64 *correct_bitmap)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ int ret = 0, i;
+
+ for (i = 0; i < nand->ecc.steps; i++) {
+ info = &meson_chip->info_buf[i];
+ if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
+ mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
+ *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
+ *correct_bitmap |= 1 >> i;
+ continue;
+ }
+ if ((nand->options & NAND_NEED_SCRAMBLING) &&
+ ECC_ZERO_CNT(*info) < nand->ecc.strength) {
+ mtd->ecc_stats.corrected += ECC_ZERO_CNT(*info);
+ *bitflips = max_t(u32, *bitflips,
+ ECC_ZERO_CNT(*info));
+ ret = ECC_CHECK_RETURN_FF;
+ } else {
+ ret = -EBADMSG;
+ }
+ }
+ return ret;
+}
+
+static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, u8 *databuf,
+ int datalen, u8 *infobuf, int infolen,
+ enum dma_data_direction dir)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ u32 cmd;
+ int ret = 0;
+
+ nfc->daddr = dma_map_single(nfc->dev, (void *)databuf, datalen, dir);
+ ret = dma_mapping_error(nfc->dev, nfc->daddr);
+ if (ret) {
+ dev_err(nfc->dev, "DMA mapping error\n");
+ return ret;
+ }
+ cmd = GENCMDDADDRL(NFC_CMD_ADL, nfc->daddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ cmd = GENCMDDADDRH(NFC_CMD_ADH, nfc->daddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ if (infobuf) {
+ nfc->iaddr = dma_map_single(nfc->dev, infobuf, infolen, dir);
+ ret = dma_mapping_error(nfc->dev, nfc->iaddr);
+ if (ret) {
+ dev_err(nfc->dev, "DMA mapping error\n");
+ dma_unmap_single(nfc->dev,
+ nfc->daddr, datalen, dir);
+ return ret;
+ }
+ cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ cmd = GENCMDIADDRH(NFC_CMD_AIH, nfc->iaddr);
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ }
+
+ return ret;
+}
+
+static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
+ int infolen, int datalen,
+ enum dma_data_direction dir)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+
+ dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
+ if (infolen)
+ dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
+}
+
+static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret = 0;
+ u32 cmd;
+ u8 *info;
+
+ info = kzalloc(PER_INFO_BYTE, GFP_KERNEL);
+ ret = meson_nfc_dma_buffer_setup(nand, buf, len, info,
+ PER_INFO_BYTE, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE);
+ kfree(info);
+
+ return ret;
+}
+
+static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int ret = 0;
+ u32 cmd;
+
+ ret = meson_nfc_dma_buffer_setup(nand, buf, len, NULL,
+ 0, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+
+ meson_nfc_drain_cmd(nfc);
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ meson_nfc_dma_buffer_release(nand, len, 0, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
+ int page, bool in)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&nand->data_interface);
+ u32 *addrs = nfc->cmdfifo.rw.addrs;
+ u32 cs = nfc->param.chip_select;
+ u32 cmd0, cmd_num, row_start;
+ int ret = 0, i;
+
+ cmd_num = sizeof(struct nand_rw_cmd) / sizeof(int);
+
+ cmd0 = in ? NAND_CMD_READ0 : NAND_CMD_SEQIN;
+ nfc->cmdfifo.rw.cmd0 = cs | NFC_CMD_CLE | cmd0;
+
+ addrs[0] = cs | NFC_CMD_ALE | 0;
+ if (mtd->writesize <= 512) {
+ cmd_num--;
+ row_start = 1;
+ } else {
+ addrs[1] = cs | NFC_CMD_ALE | 0;
+ row_start = 2;
+ }
+
+ addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0);
+ addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1);
+
+ if (nand->options & NAND_ROW_ADDR_3)
+ addrs[row_start + 2] =
+ cs | NFC_CMD_ALE | ROW_ADDER(page, 2);
+ else
+ cmd_num--;
+
+ /* subtract cmd1 */
+ cmd_num--;
+
+ for (i = 0; i < cmd_num; i++)
+ writel_relaxed(nfc->cmdfifo.cmd[i],
+ nfc->reg_base + NFC_REG_CMD);
+
+ if (in) {
+ nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART;
+ writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max));
+ } else {
+ meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
+ }
+
+ return ret;
+}
+
+static int meson_nfc_write_page_sub(struct nand_chip *nand,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&nand->data_interface);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ int data_len, info_len;
+ u32 cmd;
+ int ret;
+
+ meson_nfc_select_chip(nand, nand->cur_cs);
+
+ data_len = mtd->writesize + mtd->oobsize;
+ info_len = nand->ecc.steps * PER_INFO_BYTE;
+
+ ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRWRITE);
+ if (ret)
+ return ret;
+
+ ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
+ data_len, (u8 *)meson_chip->info_buf,
+ info_len, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ meson_nfc_cmd_seed(nfc, page);
+ meson_nfc_cmd_access(nand, raw, DIRWRITE,
+ NFC_CMD_SCRAMBLER_ENABLE);
+ } else {
+ meson_nfc_cmd_access(nand, raw, DIRWRITE,
+ NFC_CMD_SCRAMBLER_DISABLE);
+ }
+
+ cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max));
+
+ meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_write_page_raw(struct nand_chip *nand, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *oob_buf = nand->oob_poi;
+
+ meson_nfc_set_data_oob(nand, buf, oob_buf);
+
+ return meson_nfc_write_page_sub(nand, page, 1);
+}
+
+static int meson_nfc_write_page_hwecc(struct nand_chip *nand,
+ const u8 *buf, int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u8 *oob_buf = nand->oob_poi;
+
+ memcpy(meson_chip->data_buf, buf, mtd->writesize);
+ memset(meson_chip->info_buf, 0, nand->ecc.steps * PER_INFO_BYTE);
+ meson_nfc_set_user_byte(nand, oob_buf);
+
+ return meson_nfc_write_page_sub(nand, page, 0);
+}
+
+static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
+ struct nand_chip *nand, int raw)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ __le64 *info;
+ u32 neccpages;
+ int ret;
+
+ neccpages = raw ? 1 : nand->ecc.steps;
+ info = &meson_chip->info_buf[neccpages - 1];
+ do {
+ usleep_range(10, 15);
+ /* info is updated by nfc dma engine*/
+ smp_rmb();
+ ret = *info & ECC_COMPLETE;
+ } while (!ret);
+}
+
+static int meson_nfc_read_page_sub(struct nand_chip *nand,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int data_len, info_len;
+ int ret;
+
+ meson_nfc_select_chip(nand, nand->cur_cs);
+
+ data_len = mtd->writesize + mtd->oobsize;
+ info_len = nand->ecc.steps * PER_INFO_BYTE;
+
+ ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRREAD);
+ if (ret)
+ return ret;
+
+ ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
+ data_len, (u8 *)meson_chip->info_buf,
+ info_len, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ meson_nfc_cmd_seed(nfc, page);
+ meson_nfc_cmd_access(nand, raw, DIRREAD,
+ NFC_CMD_SCRAMBLER_ENABLE);
+ } else {
+ meson_nfc_cmd_access(nand, raw, DIRREAD,
+ NFC_CMD_SCRAMBLER_DISABLE);
+ }
+
+ ret = meson_nfc_wait_dma_finish(nfc);
+ meson_nfc_check_ecc_pages_valid(nfc, nand, raw);
+
+ meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int meson_nfc_read_page_raw(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ u8 *oob_buf = nand->oob_poi;
+ int ret;
+
+ ret = meson_nfc_read_page_sub(nand, page, 1);
+ if (ret)
+ return ret;
+
+ meson_nfc_get_data_oob(nand, buf, oob_buf);
+
+ return 0;
+}
+
+static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ u64 correct_bitmap = 0;
+ u32 bitflips = 0;
+ u8 *oob_buf = nand->oob_poi;
+ int ret, i;
+
+ ret = meson_nfc_read_page_sub(nand, page, 0);
+ if (ret)
+ return ret;
+
+ meson_nfc_get_user_byte(nand, oob_buf);
+ ret = meson_nfc_ecc_correct(nand, &bitflips, &correct_bitmap);
+ if (ret == ECC_CHECK_RETURN_FF) {
+ if (buf)
+ memset(buf, 0xff, mtd->writesize);
+ memset(oob_buf, 0xff, mtd->oobsize);
+ } else if (ret < 0) {
+ if ((nand->options & NAND_NEED_SCRAMBLING) || !buf) {
+ mtd->ecc_stats.failed++;
+ return bitflips;
+ }
+ ret = meson_nfc_read_page_raw(nand, buf, 0, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nand->ecc.steps ; i++) {
+ u8 *data = buf + i * ecc->size;
+ u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
+
+ if (correct_bitmap & (1 << i))
+ continue;
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 2,
+ NULL, 0,
+ ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ bitflips = max_t(u32, bitflips, ret);
+ }
+ }
+ } else if (buf && buf != meson_chip->data_buf) {
+ memcpy(buf, meson_chip->data_buf, mtd->writesize);
+ }
+
+ return bitflips;
+}
+
+static int meson_nfc_read_oob_raw(struct nand_chip *nand, int page)
+{
+ return meson_nfc_read_page_raw(nand, NULL, 1, page);
+}
+
+static int meson_nfc_read_oob(struct nand_chip *nand, int page)
+{
+ return meson_nfc_read_page_hwecc(nand, NULL, 1, page);
+}
+
+static bool meson_nfc_is_buffer_dma_safe(const void *buffer)
+{
+ if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer)))
+ return true;
+ return false;
+}
+
+static void *
+meson_nand_op_get_dma_safe_input_buf(const struct nand_op_instr *instr)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR))
+ return NULL;
+
+ if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.in))
+ return instr->ctx.data.buf.in;
+
+ return kzalloc(instr->ctx.data.len, GFP_KERNEL);
+}
+
+static void
+meson_nand_op_put_dma_safe_input_buf(const struct nand_op_instr *instr,
+ void *buf)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR) ||
+ WARN_ON(!buf))
+ return;
+
+ if (buf == instr->ctx.data.buf.in)
+ return;
+
+ memcpy(instr->ctx.data.buf.in, buf, instr->ctx.data.len);
+ kfree(buf);
+}
+
+static void *
+meson_nand_op_get_dma_safe_output_buf(const struct nand_op_instr *instr)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR))
+ return NULL;
+
+ if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.out))
+ return (void *)instr->ctx.data.buf.out;
+
+ return kmemdup(instr->ctx.data.buf.out,
+ instr->ctx.data.len, GFP_KERNEL);
+}
+
+static void
+meson_nand_op_put_dma_safe_output_buf(const struct nand_op_instr *instr,
+ const void *buf)
+{
+ if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR) ||
+ WARN_ON(!buf))
+ return;
+
+ if (buf != instr->ctx.data.buf.out)
+ kfree(buf);
+}
+
+static int meson_nfc_exec_op(struct nand_chip *nand,
+ const struct nand_operation *op, bool check_only)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ const struct nand_op_instr *instr = NULL;
+ void *buf;
+ u32 op_id, delay_idle, cmd;
+ int i;
+
+ meson_nfc_select_chip(nand, op->cs);
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+ delay_idle = DIV_ROUND_UP(PSEC_TO_NSEC(instr->delay_ns),
+ meson_chip->level1_divider *
+ NFC_CLK_CYCLE);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ cmd = nfc->param.chip_select | NFC_CMD_CLE;
+ cmd |= instr->ctx.cmd.opcode & 0xff;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ cmd = nfc->param.chip_select | NFC_CMD_ALE;
+ cmd |= instr->ctx.addr.addrs[i] & 0xff;
+ writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ }
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ buf = meson_nand_op_get_dma_safe_input_buf(instr);
+ if (!buf)
+ return -ENOMEM;
+ meson_nfc_read_buf(nand, buf, instr->ctx.data.len);
+ meson_nand_op_put_dma_safe_input_buf(instr, buf);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ buf = meson_nand_op_get_dma_safe_output_buf(instr);
+ if (!buf)
+ return -ENOMEM;
+ meson_nfc_write_buf(nand, buf, instr->ctx.data.len);
+ meson_nand_op_put_dma_safe_output_buf(instr, buf);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms);
+ if (instr->delay_ns)
+ meson_nfc_cmd_idle(nfc, delay_idle);
+ break;
+ }
+ }
+ meson_nfc_wait_cmd_finish(nfc, 1000);
+ return 0;
+}
+
+static int meson_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = 2 + (section * (2 + nand->ecc.bytes));
+ oobregion->length = nand->ecc.bytes;
+
+ return 0;
+}
+
+static int meson_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = section * (2 + nand->ecc.bytes);
+ oobregion->length = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
+ .ecc = meson_ooblayout_ecc,
+ .free = meson_ooblayout_free,
+};
+
+static int meson_nfc_clk_init(struct meson_nfc *nfc)
+{
+ int ret;
+
+ /* request core clock */
+ nfc->core_clk = devm_clk_get(nfc->dev, "core");
+ if (IS_ERR(nfc->core_clk)) {
+ dev_err(nfc->dev, "failed to get core clock\n");
+ return PTR_ERR(nfc->core_clk);
+ }
+
+ nfc->device_clk = devm_clk_get(nfc->dev, "device");
+ if (IS_ERR(nfc->device_clk)) {
+ dev_err(nfc->dev, "failed to get device clock\n");
+ return PTR_ERR(nfc->device_clk);
+ }
+
+ nfc->phase_tx = devm_clk_get(nfc->dev, "tx");
+ if (IS_ERR(nfc->phase_tx)) {
+ dev_err(nfc->dev, "failed to get TX clk\n");
+ return PTR_ERR(nfc->phase_tx);
+ }
+
+ nfc->phase_rx = devm_clk_get(nfc->dev, "rx");
+ if (IS_ERR(nfc->phase_rx)) {
+ dev_err(nfc->dev, "failed to get RX clk\n");
+ return PTR_ERR(nfc->phase_rx);
+ }
+
+ /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
+ regmap_update_bits(nfc->reg_clk,
+ 0, CLK_SELECT_NAND, CLK_SELECT_NAND);
+
+ ret = clk_prepare_enable(nfc->core_clk);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->device_clk);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable device clock\n");
+ goto err_device_clk;
+ }
+
+ ret = clk_prepare_enable(nfc->phase_tx);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable TX clock\n");
+ goto err_phase_tx;
+ }
+
+ ret = clk_prepare_enable(nfc->phase_rx);
+ if (ret) {
+ dev_err(nfc->dev, "failed to enable RX clock\n");
+ goto err_phase_rx;
+ }
+
+ ret = clk_set_rate(nfc->device_clk, 24000000);
+ if (ret)
+ goto err_phase_rx;
+
+ return 0;
+err_phase_rx:
+ clk_disable_unprepare(nfc->phase_tx);
+err_phase_tx:
+ clk_disable_unprepare(nfc->device_clk);
+err_device_clk:
+ clk_disable_unprepare(nfc->core_clk);
+ return ret;
+}
+
+static void meson_nfc_disable_clk(struct meson_nfc *nfc)
+{
+ clk_disable_unprepare(nfc->phase_rx);
+ clk_disable_unprepare(nfc->phase_tx);
+ clk_disable_unprepare(nfc->device_clk);
+ clk_disable_unprepare(nfc->core_clk);
+}
+
+static void meson_nfc_free_buffer(struct nand_chip *nand)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+
+ kfree(meson_chip->info_buf);
+ kfree(meson_chip->data_buf);
+}
+
+static int meson_chip_buffer_init(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ u32 page_bytes, info_bytes, nsectors;
+
+ nsectors = mtd->writesize / nand->ecc.size;
+
+ page_bytes = mtd->writesize + mtd->oobsize;
+ info_bytes = nsectors * PER_INFO_BYTE;
+
+ meson_chip->data_buf = kmalloc(page_bytes, GFP_KERNEL);
+ if (!meson_chip->data_buf)
+ return -ENOMEM;
+
+ meson_chip->info_buf = kmalloc(info_bytes, GFP_KERNEL);
+ if (!meson_chip->info_buf) {
+ kfree(meson_chip->data_buf);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static
+int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ const struct nand_sdr_timings *timings;
+ u32 div, bt_min, bt_max, tbers_clocks;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ div = DIV_ROUND_UP((timings->tRC_min / 1000), NFC_CLK_CYCLE);
+ bt_min = (timings->tREA_max + NFC_DEFAULT_DELAY) / div;
+ bt_max = (NFC_DEFAULT_DELAY + timings->tRHOH_min +
+ timings->tRC_min / 2) / div;
+
+ meson_chip->twb = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tWB_max),
+ div * NFC_CLK_CYCLE);
+ meson_chip->tadl = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tADL_min),
+ div * NFC_CLK_CYCLE);
+ tbers_clocks = DIV_ROUND_UP_ULL(PSEC_TO_NSEC(timings->tBERS_max),
+ div * NFC_CLK_CYCLE);
+ meson_chip->tbers_max = ilog2(tbers_clocks);
+ if (!is_power_of_2(tbers_clocks))
+ meson_chip->tbers_max++;
+
+ bt_min = DIV_ROUND_UP(bt_min, 1000);
+ bt_max = DIV_ROUND_UP(bt_max, 1000);
+
+ if (bt_max < bt_min)
+ return -EINVAL;
+
+ meson_chip->level1_divider = div;
+ meson_chip->clk_rate = 1000000000 / meson_chip->level1_divider;
+ meson_chip->bus_timing = (bt_min + bt_max) / 2 + 1;
+
+ return 0;
+}
+
+static int meson_nand_bch_mode(struct nand_chip *nand)
+{
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ int i;
+
+ if (nand->ecc.strength > 60 || nand->ecc.strength < 8)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) {
+ if (meson_ecc[i].strength == nand->ecc.strength) {
+ meson_chip->bch_mode = meson_ecc[i].bch;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void meson_nand_detach_chip(struct nand_chip *nand)
+{
+ meson_nfc_free_buffer(nand);
+}
+
+static int meson_nand_attach_chip(struct nand_chip *nand)
+{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+ struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ int nsectors = mtd->writesize / 1024;
+ int ret;
+
+ if (!mtd->name) {
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand%d",
+ dev_name(nfc->dev),
+ meson_chip->sels[0]);
+ if (!mtd->name)
+ return -ENOMEM;
+ }
+
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
+ mtd->oobsize - 2 * nsectors);
+ if (ret) {
+ dev_err(nfc->dev, "failed to ECC init\n");
+ return -EINVAL;
+ }
+
+ ret = meson_nand_bch_mode(nand);
+ if (ret)
+ return -EINVAL;
+
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.write_page_raw = meson_nfc_write_page_raw;
+ nand->ecc.write_page = meson_nfc_write_page_hwecc;
+ nand->ecc.write_oob_raw = nand_write_oob_std;
+ nand->ecc.write_oob = nand_write_oob_std;
+
+ nand->ecc.read_page_raw = meson_nfc_read_page_raw;
+ nand->ecc.read_page = meson_nfc_read_page_hwecc;
+ nand->ecc.read_oob_raw = meson_nfc_read_oob_raw;
+ nand->ecc.read_oob = meson_nfc_read_oob;
+
+ if (nand->options & NAND_BUSWIDTH_16) {
+ dev_err(nfc->dev, "16bits bus width not supported");
+ return -EINVAL;
+ }
+ ret = meson_chip_buffer_init(nand);
+ if (ret)
+ return -ENOMEM;
+
+ return ret;
+}
+
+static const struct nand_controller_ops meson_nand_controller_ops = {
+ .attach_chip = meson_nand_attach_chip,
+ .detach_chip = meson_nand_detach_chip,
+ .setup_data_interface = meson_nfc_setup_data_interface,
+ .exec_op = meson_nfc_exec_op,
+};
+
+static int
+meson_nfc_nand_chip_init(struct device *dev,
+ struct meson_nfc *nfc, struct device_node *np)
+{
+ struct meson_nfc_nand_chip *meson_chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int ret, i;
+ u32 tmp, nsels;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -EINVAL;
+
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > MAX_CE_NUM) {
+ dev_err(dev, "invalid register property size\n");
+ return -EINVAL;
+ }
+
+ meson_chip = devm_kzalloc(dev,
+ sizeof(*meson_chip) + (nsels * sizeof(u8)),
+ GFP_KERNEL);
+ if (!meson_chip)
+ return -ENOMEM;
+
+ meson_chip->nsels = nsels;
+
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "could not retrieve register property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+ }
+
+ nand = &meson_chip->nand;
+ nand->controller = &nfc->controller;
+ nand->controller->ops = &meson_nand_controller_ops;
+ nand_set_flash_node(nand, np);
+ nand_set_controller_data(nand, nfc);
+
+ nand->options |= NAND_USE_BOUNCE_BUFFER;
+ mtd = nand_to_mtd(nand);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ ret = nand_scan(nand, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register MTD device: %d\n", ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ list_add_tail(&meson_chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
+{
+ struct meson_nfc_nand_chip *meson_chip;
+ struct mtd_info *mtd;
+ int ret;
+
+ while (!list_empty(&nfc->chips)) {
+ meson_chip = list_first_entry(&nfc->chips,
+ struct meson_nfc_nand_chip, node);
+ mtd = nand_to_mtd(&meson_chip->nand);
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ meson_nfc_free_buffer(&meson_chip->nand);
+ nand_cleanup(&meson_chip->nand);
+ list_del(&meson_chip->node);
+ }
+
+ return 0;
+}
+
+static int meson_nfc_nand_chips_init(struct device *dev,
+ struct meson_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ meson_nfc_nand_chip_cleanup(nfc);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t meson_nfc_irq(int irq, void *id)
+{
+ struct meson_nfc *nfc = id;
+ u32 cfg;
+
+ cfg = readl(nfc->reg_base + NFC_REG_CFG);
+ if (!(cfg & NFC_RB_IRQ_EN))
+ return IRQ_NONE;
+
+ cfg &= ~(NFC_RB_IRQ_EN);
+ writel(cfg, nfc->reg_base + NFC_REG_CFG);
+
+ complete(&nfc->completion);
+ return IRQ_HANDLED;
+}
+
+static const struct meson_nfc_data meson_gxl_data = {
+ .ecc_caps = &meson_gxl_ecc_caps,
+};
+
+static const struct meson_nfc_data meson_axg_data = {
+ .ecc_caps = &meson_axg_ecc_caps,
+};
+
+static const struct of_device_id meson_nfc_id_table[] = {
+ {
+ .compatible = "amlogic,meson-gxl-nfc",
+ .data = &meson_gxl_data,
+ }, {
+ .compatible = "amlogic,meson-axg-nfc",
+ .data = &meson_axg_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, meson_nfc_id_table);
+
+static int meson_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_nfc *nfc;
+ struct resource *res;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->data = of_device_get_match_data(&pdev->dev);
+ if (!nfc->data)
+ return -ENODEV;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->reg_base))
+ return PTR_ERR(nfc->reg_base);
+
+ nfc->reg_clk =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "amlogic,mmc-syscon");
+ if (IS_ERR(nfc->reg_clk)) {
+ dev_err(dev, "Failed to lookup clock base\n");
+ return PTR_ERR(nfc->reg_clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no NFC IRQ resource\n");
+ return -EINVAL;
+ }
+
+ ret = meson_nfc_clk_init(nfc);
+ if (ret) {
+ dev_err(dev, "failed to initialize NAND clock\n");
+ return ret;
+ }
+
+ writel(0, nfc->reg_base + NFC_REG_CFG);
+ ret = devm_request_irq(dev, irq, meson_nfc_irq, 0, dev_name(dev), nfc);
+ if (ret) {
+ dev_err(dev, "failed to request NFC IRQ\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set DMA mask\n");
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = meson_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init NAND chips\n");
+ goto err_clk;
+ }
+
+ return 0;
+err_clk:
+ meson_nfc_disable_clk(nfc);
+ return ret;
+}
+
+static int meson_nfc_remove(struct platform_device *pdev)
+{
+ struct meson_nfc *nfc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = meson_nfc_nand_chip_cleanup(nfc);
+ if (ret)
+ return ret;
+
+ meson_nfc_disable_clk(nfc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver meson_nfc_driver = {
+ .probe = meson_nfc_probe,
+ .remove = meson_nfc_remove,
+ .driver = {
+ .name = "meson-nand",
+ .of_match_table = meson_nfc_id_table,
+ },
+};
+module_platform_driver(meson_nfc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Liang Yang <liang.yang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic's Meson NAND Flash Controller driver");
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 6432bd70c3b3..05b0c19d72d9 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -267,11 +267,15 @@ static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
struct mtk_ecc *ecc;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
- get_device(&pdev->dev);
ecc = platform_get_drvdata(pdev);
+ if (!ecc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
clk_prepare_enable(ecc->clk);
mtk_ecc_hw_init(ecc);
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index b6b4602f5132..2c0e09187773 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1451,8 +1451,7 @@ static int mtk_nfc_probe(struct platform_device *pdev)
if (!nfc)
return -ENOMEM;
- spin_lock_init(&nfc->controller.lock);
- init_waitqueue_head(&nfc->controller.wq);
+ nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
nfc->controller.ops = &mtk_nfc_controller_ops;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 839494ac457c..ddd396e93e32 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -278,11 +278,8 @@ EXPORT_SYMBOL_GPL(nand_deselect_target);
static void nand_release_device(struct nand_chip *chip)
{
/* Release the controller and the chip */
- spin_lock(&chip->controller->lock);
- chip->controller->active = NULL;
- chip->state = FL_READY;
- wake_up(&chip->controller->wq);
- spin_unlock(&chip->controller->lock);
+ mutex_unlock(&chip->controller->lock);
+ mutex_unlock(&chip->lock);
}
/**
@@ -331,57 +328,23 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
}
/**
- * panic_nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @new_state: the state which is requested
- *
- * Used when in panic, no locks are taken.
- */
-static void panic_nand_get_device(struct nand_chip *chip, int new_state)
-{
- /* Hardware controller shared among independent devices */
- chip->controller->active = chip;
- chip->state = new_state;
-}
-
-/**
* nand_get_device - [GENERIC] Get chip for selected access
* @chip: NAND chip structure
- * @new_state: the state which is requested
*
- * Get the device and lock it for exclusive access
+ * Lock the device and its controller for exclusive access
+ *
+ * Return: -EBUSY if the chip has been suspended, 0 otherwise
*/
-static int
-nand_get_device(struct nand_chip *chip, int new_state)
+static int nand_get_device(struct nand_chip *chip)
{
- spinlock_t *lock = &chip->controller->lock;
- wait_queue_head_t *wq = &chip->controller->wq;
- DECLARE_WAITQUEUE(wait, current);
-retry:
- spin_lock(lock);
-
- /* Hardware controller shared among independent devices */
- if (!chip->controller->active)
- chip->controller->active = chip;
-
- if (chip->controller->active == chip && chip->state == FL_READY) {
- chip->state = new_state;
- spin_unlock(lock);
- return 0;
- }
- if (new_state == FL_PM_SUSPENDED) {
- if (chip->controller->active->state == FL_PM_SUSPENDED) {
- chip->state = FL_PM_SUSPENDED;
- spin_unlock(lock);
- return 0;
- }
+ mutex_lock(&chip->lock);
+ if (chip->suspended) {
+ mutex_unlock(&chip->lock);
+ return -EBUSY;
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(wq, &wait);
- spin_unlock(lock);
- schedule();
- remove_wait_queue(wq, &wait);
- goto retry;
+ mutex_lock(&chip->controller->lock);
+
+ return 0;
}
/**
@@ -458,7 +421,7 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int chipnr, page, status, len;
+ int chipnr, page, status, len, ret;
pr_debug("%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int)to, (int)ops->ooblen);
@@ -480,7 +443,9 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- nand_reset(chip, chipnr);
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
nand_select_target(chip, chipnr);
@@ -603,7 +568,10 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
- nand_get_device(chip, FL_WRITING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
ret = nand_markbad_bbm(chip, ofs);
nand_release_device(chip);
}
@@ -3581,7 +3549,9 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->mode != MTD_OPS_RAW)
return -ENOTSUPP;
- nand_get_device(chip, FL_READING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
if (!ops->datbuf)
ret = nand_do_read_oob(chip, from, ops);
@@ -4100,9 +4070,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
struct mtd_oob_ops ops;
int ret;
- /* Grab the device */
- panic_nand_get_device(chip, FL_WRITING);
-
nand_select_target(chip, chipnr);
/* Wait for the device to get ready */
@@ -4133,7 +4100,9 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
ops->retlen = 0;
- nand_get_device(chip, FL_WRITING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -4156,23 +4125,6 @@ out:
}
/**
- * single_erase - [GENERIC] NAND standard block erase command function
- * @chip: NAND chip object
- * @page: the page address of the block which will be erased
- *
- * Standard erase command for NAND chips. Returns NAND status.
- */
-static int single_erase(struct nand_chip *chip, int page)
-{
- unsigned int eraseblock;
-
- /* Send commands to erase a block */
- eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
-
- return nand_erase_op(chip, eraseblock);
-}
-
-/**
* nand_erase - [MTD Interface] erase block(s)
* @mtd: MTD device structure
* @instr: erase instruction
@@ -4195,7 +4147,7 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
int allowbbt)
{
- int page, status, pages_per_block, ret, chipnr;
+ int page, pages_per_block, ret, chipnr;
loff_t len;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
@@ -4206,7 +4158,9 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
return -EINVAL;
/* Grab the lock and see if the device is available */
- nand_get_device(chip, FL_ERASING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -4247,17 +4201,11 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
(page + pages_per_block))
chip->pagebuf = -1;
- if (chip->legacy.erase)
- status = chip->legacy.erase(chip,
- page & chip->pagemask);
- else
- status = single_erase(chip, page & chip->pagemask);
-
- /* See if block erase succeeded */
- if (status) {
+ ret = nand_erase_op(chip, (page & chip->pagemask) >>
+ (chip->phys_erase_shift - chip->page_shift));
+ if (ret) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
- ret = -EIO;
instr->fail_addr =
((loff_t)page << chip->page_shift);
goto erase_exit;
@@ -4299,7 +4247,7 @@ static void nand_sync(struct mtd_info *mtd)
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- nand_get_device(chip, FL_SYNCING);
+ WARN_ON(nand_get_device(chip));
/* Release it and go back */
nand_release_device(chip);
}
@@ -4316,7 +4264,10 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
int ret;
/* Select the NAND device */
- nand_get_device(chip, FL_READING);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
nand_select_target(chip, chipnr);
ret = nand_block_checkbad(chip, offs, 0);
@@ -4389,7 +4340,13 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
*/
static int nand_suspend(struct mtd_info *mtd)
{
- return nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ mutex_lock(&chip->lock);
+ chip->suspended = 1;
+ mutex_unlock(&chip->lock);
+
+ return 0;
}
/**
@@ -4400,11 +4357,13 @@ static void nand_resume(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- if (chip->state == FL_PM_SUSPENDED)
- nand_release_device(chip);
+ mutex_lock(&chip->lock);
+ if (chip->suspended)
+ chip->suspended = 0;
else
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
+ mutex_unlock(&chip->lock);
}
/**
@@ -4414,7 +4373,7 @@ static void nand_resume(struct mtd_info *mtd)
*/
static void nand_shutdown(struct mtd_info *mtd)
{
- nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
+ nand_suspend(mtd);
}
/* Set default functions */
@@ -5019,6 +4978,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
/* Assume all dies are deselected when we enter nand_scan_ident(). */
chip->cur_cs = -1;
+ mutex_init(&chip->lock);
+
/* Enforce the right timings for reset/detection */
onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
@@ -5061,11 +5022,15 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
u8 id[2];
/* See comment in nand_get_flash_type for reset */
- nand_reset(chip, i);
+ ret = nand_reset(chip, i);
+ if (ret)
+ break;
nand_select_target(chip, i);
/* Send the command for reading device ID */
- nand_readid_op(chip, 0, id, sizeof(id));
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ break;
/* Read manufacturer and device IDs */
if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
nand_deselect_target(chip);
@@ -5556,6 +5521,7 @@ static int nand_scan_tail(struct nand_chip *chip)
}
if (!ecc->read_page)
ecc->read_page = nand_read_page_hwecc_oob_first;
+ /* fall through */
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
@@ -5575,6 +5541,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->read_subpage = nand_read_subpage;
if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
ecc->write_subpage = nand_write_subpage_hwecc;
+ /* fall through */
case NAND_ECC_HW_SYNDROME:
if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
@@ -5612,6 +5579,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->size, mtd->writesize);
ecc->mode = NAND_ECC_SOFT;
ecc->algo = NAND_ECC_HAMMING;
+ /* fall through */
case NAND_ECC_SOFT:
ret = nand_set_ecc_soft_ops(chip);
@@ -5718,9 +5686,6 @@ static int nand_scan_tail(struct nand_chip *chip)
}
chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
- /* Initialize state */
- chip->state = FL_READY;
-
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index 43575943f13b..f2526ec616a6 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -331,6 +331,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command,
*/
if (column == -1 && page_addr == -1)
return;
+ /* fall through */
default:
/*
@@ -483,7 +484,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command,
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
- /* This applies to read commands */
+ /* fall through - This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 68e8b9f7f372..8f280a2962c8 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -994,12 +994,9 @@ static int omap_wait(struct nand_chip *this)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
unsigned long timeo = jiffies;
- int status, state = this->state;
+ int status;
- if (state == FL_ERASING)
- timeo += msecs_to_jiffies(400);
- else
- timeo += msecs_to_jiffies(20);
+ timeo += msecs_to_jiffies(400);
writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
@@ -2173,11 +2170,8 @@ static const struct nand_controller_ops omap_nand_controller_ops = {
};
/* Shared among all NAND instances to synchronize access to the ECC Engine */
-static struct nand_controller omap_gpmc_controller = {
- .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
- .ops = &omap_nand_controller_ops,
-};
+static struct nand_controller omap_gpmc_controller;
+static bool omap_gpmc_controller_initialized;
static int omap_nand_probe(struct platform_device *pdev)
{
@@ -2227,6 +2221,12 @@ static int omap_nand_probe(struct platform_device *pdev)
info->phys_base = res->start;
+ if (!omap_gpmc_controller_initialized) {
+ omap_gpmc_controller.ops = &omap_nand_controller_ops;
+ nand_controller_init(&omap_gpmc_controller);
+ omap_gpmc_controller_initialized = true;
+ }
+
nand_chip->controller = &omap_gpmc_controller;
nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index c01422d953dd..86456216fb93 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -369,8 +369,7 @@ static int r852_wait(struct nand_chip *chip)
unsigned long timeout;
u8 status;
- timeout = jiffies + (chip->state == FL_ERASING ?
- msecs_to_jiffies(400) : msecs_to_jiffies(20));
+ timeout = jiffies + msecs_to_jiffies(400);
while (time_before(jiffies, timeout))
if (chip->legacy.dev_ready(chip))
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
new file mode 100644
index 000000000000..999ca6a66036
--- /dev/null
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -0,0 +1,2073 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018
+ * Author: Christophe Kerello <christophe.kerello@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* Bad block marker length */
+#define FMC2_BBM_LEN 2
+
+/* ECC step size */
+#define FMC2_ECC_STEP_SIZE 512
+
+/* BCHDSRx registers length */
+#define FMC2_BCHDSRS_LEN 20
+
+/* HECCR length */
+#define FMC2_HECCR_LEN 4
+
+/* Max requests done for a 8k nand page size */
+#define FMC2_MAX_SG 16
+
+/* Max chip enable */
+#define FMC2_MAX_CE 2
+
+/* Max ECC buffer length */
+#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
+
+/* Timings */
+#define FMC2_THIZ 1
+#define FMC2_TIO 8000
+#define FMC2_TSYNC 3000
+#define FMC2_PCR_TIMING_MASK 0xf
+#define FMC2_PMEM_PATT_TIMING_MASK 0xff
+
+/* FMC2 Controller Registers */
+#define FMC2_BCR1 0x0
+#define FMC2_PCR 0x80
+#define FMC2_SR 0x84
+#define FMC2_PMEM 0x88
+#define FMC2_PATT 0x8c
+#define FMC2_HECCR 0x94
+#define FMC2_CSQCR 0x200
+#define FMC2_CSQCFGR1 0x204
+#define FMC2_CSQCFGR2 0x208
+#define FMC2_CSQCFGR3 0x20c
+#define FMC2_CSQAR1 0x210
+#define FMC2_CSQAR2 0x214
+#define FMC2_CSQIER 0x220
+#define FMC2_CSQISR 0x224
+#define FMC2_CSQICR 0x228
+#define FMC2_CSQEMSR 0x230
+#define FMC2_BCHIER 0x250
+#define FMC2_BCHISR 0x254
+#define FMC2_BCHICR 0x258
+#define FMC2_BCHPBR1 0x260
+#define FMC2_BCHPBR2 0x264
+#define FMC2_BCHPBR3 0x268
+#define FMC2_BCHPBR4 0x26c
+#define FMC2_BCHDSR0 0x27c
+#define FMC2_BCHDSR1 0x280
+#define FMC2_BCHDSR2 0x284
+#define FMC2_BCHDSR3 0x288
+#define FMC2_BCHDSR4 0x28c
+
+/* Register: FMC2_BCR1 */
+#define FMC2_BCR1_FMC2EN BIT(31)
+
+/* Register: FMC2_PCR */
+#define FMC2_PCR_PWAITEN BIT(1)
+#define FMC2_PCR_PBKEN BIT(2)
+#define FMC2_PCR_PWID_MASK GENMASK(5, 4)
+#define FMC2_PCR_PWID(x) (((x) & 0x3) << 4)
+#define FMC2_PCR_PWID_BUSWIDTH_8 0
+#define FMC2_PCR_PWID_BUSWIDTH_16 1
+#define FMC2_PCR_ECCEN BIT(6)
+#define FMC2_PCR_ECCALG BIT(8)
+#define FMC2_PCR_TCLR_MASK GENMASK(12, 9)
+#define FMC2_PCR_TCLR(x) (((x) & 0xf) << 9)
+#define FMC2_PCR_TCLR_DEFAULT 0xf
+#define FMC2_PCR_TAR_MASK GENMASK(16, 13)
+#define FMC2_PCR_TAR(x) (((x) & 0xf) << 13)
+#define FMC2_PCR_TAR_DEFAULT 0xf
+#define FMC2_PCR_ECCSS_MASK GENMASK(19, 17)
+#define FMC2_PCR_ECCSS(x) (((x) & 0x7) << 17)
+#define FMC2_PCR_ECCSS_512 1
+#define FMC2_PCR_ECCSS_2048 3
+#define FMC2_PCR_BCHECC BIT(24)
+#define FMC2_PCR_WEN BIT(25)
+
+/* Register: FMC2_SR */
+#define FMC2_SR_NWRF BIT(6)
+
+/* Register: FMC2_PMEM */
+#define FMC2_PMEM_MEMSET(x) (((x) & 0xff) << 0)
+#define FMC2_PMEM_MEMWAIT(x) (((x) & 0xff) << 8)
+#define FMC2_PMEM_MEMHOLD(x) (((x) & 0xff) << 16)
+#define FMC2_PMEM_MEMHIZ(x) (((x) & 0xff) << 24)
+#define FMC2_PMEM_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_PATT */
+#define FMC2_PATT_ATTSET(x) (((x) & 0xff) << 0)
+#define FMC2_PATT_ATTWAIT(x) (((x) & 0xff) << 8)
+#define FMC2_PATT_ATTHOLD(x) (((x) & 0xff) << 16)
+#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24)
+#define FMC2_PATT_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_CSQCR */
+#define FMC2_CSQCR_CSQSTART BIT(0)
+
+/* Register: FMC2_CSQCFGR1 */
+#define FMC2_CSQCFGR1_CMD2EN BIT(1)
+#define FMC2_CSQCFGR1_DMADEN BIT(2)
+#define FMC2_CSQCFGR1_ACYNBR(x) (((x) & 0x7) << 4)
+#define FMC2_CSQCFGR1_CMD1(x) (((x) & 0xff) << 8)
+#define FMC2_CSQCFGR1_CMD2(x) (((x) & 0xff) << 16)
+#define FMC2_CSQCFGR1_CMD1T BIT(24)
+#define FMC2_CSQCFGR1_CMD2T BIT(25)
+
+/* Register: FMC2_CSQCFGR2 */
+#define FMC2_CSQCFGR2_SQSDTEN BIT(0)
+#define FMC2_CSQCFGR2_RCMD2EN BIT(1)
+#define FMC2_CSQCFGR2_DMASEN BIT(2)
+#define FMC2_CSQCFGR2_RCMD1(x) (((x) & 0xff) << 8)
+#define FMC2_CSQCFGR2_RCMD2(x) (((x) & 0xff) << 16)
+#define FMC2_CSQCFGR2_RCMD1T BIT(24)
+#define FMC2_CSQCFGR2_RCMD2T BIT(25)
+
+/* Register: FMC2_CSQCFGR3 */
+#define FMC2_CSQCFGR3_SNBR(x) (((x) & 0x1f) << 8)
+#define FMC2_CSQCFGR3_AC1T BIT(16)
+#define FMC2_CSQCFGR3_AC2T BIT(17)
+#define FMC2_CSQCFGR3_AC3T BIT(18)
+#define FMC2_CSQCFGR3_AC4T BIT(19)
+#define FMC2_CSQCFGR3_AC5T BIT(20)
+#define FMC2_CSQCFGR3_SDT BIT(21)
+#define FMC2_CSQCFGR3_RAC1T BIT(22)
+#define FMC2_CSQCFGR3_RAC2T BIT(23)
+
+/* Register: FMC2_CSQCAR1 */
+#define FMC2_CSQCAR1_ADDC1(x) (((x) & 0xff) << 0)
+#define FMC2_CSQCAR1_ADDC2(x) (((x) & 0xff) << 8)
+#define FMC2_CSQCAR1_ADDC3(x) (((x) & 0xff) << 16)
+#define FMC2_CSQCAR1_ADDC4(x) (((x) & 0xff) << 24)
+
+/* Register: FMC2_CSQCAR2 */
+#define FMC2_CSQCAR2_ADDC5(x) (((x) & 0xff) << 0)
+#define FMC2_CSQCAR2_NANDCEN(x) (((x) & 0x3) << 10)
+#define FMC2_CSQCAR2_SAO(x) (((x) & 0xffff) << 16)
+
+/* Register: FMC2_CSQIER */
+#define FMC2_CSQIER_TCIE BIT(0)
+
+/* Register: FMC2_CSQICR */
+#define FMC2_CSQICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_CSQEMSR */
+#define FMC2_CSQEMSR_SEM GENMASK(15, 0)
+
+/* Register: FMC2_BCHIER */
+#define FMC2_BCHIER_DERIE BIT(1)
+#define FMC2_BCHIER_EPBRIE BIT(4)
+
+/* Register: FMC2_BCHICR */
+#define FMC2_BCHICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_BCHDSR0 */
+#define FMC2_BCHDSR0_DUE BIT(0)
+#define FMC2_BCHDSR0_DEF BIT(1)
+#define FMC2_BCHDSR0_DEN_MASK GENMASK(7, 4)
+#define FMC2_BCHDSR0_DEN_SHIFT 4
+
+/* Register: FMC2_BCHDSR1 */
+#define FMC2_BCHDSR1_EBP1_MASK GENMASK(12, 0)
+#define FMC2_BCHDSR1_EBP2_MASK GENMASK(28, 16)
+#define FMC2_BCHDSR1_EBP2_SHIFT 16
+
+/* Register: FMC2_BCHDSR2 */
+#define FMC2_BCHDSR2_EBP3_MASK GENMASK(12, 0)
+#define FMC2_BCHDSR2_EBP4_MASK GENMASK(28, 16)
+#define FMC2_BCHDSR2_EBP4_SHIFT 16
+
+/* Register: FMC2_BCHDSR3 */
+#define FMC2_BCHDSR3_EBP5_MASK GENMASK(12, 0)
+#define FMC2_BCHDSR3_EBP6_MASK GENMASK(28, 16)
+#define FMC2_BCHDSR3_EBP6_SHIFT 16
+
+/* Register: FMC2_BCHDSR4 */
+#define FMC2_BCHDSR4_EBP7_MASK GENMASK(12, 0)
+#define FMC2_BCHDSR4_EBP8_MASK GENMASK(28, 16)
+#define FMC2_BCHDSR4_EBP8_SHIFT 16
+
+enum stm32_fmc2_ecc {
+ FMC2_ECC_HAM = 1,
+ FMC2_ECC_BCH4 = 4,
+ FMC2_ECC_BCH8 = 8
+};
+
+enum stm32_fmc2_irq_state {
+ FMC2_IRQ_UNKNOWN = 0,
+ FMC2_IRQ_BCH,
+ FMC2_IRQ_SEQ
+};
+
+struct stm32_fmc2_timings {
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 twait;
+ u8 thold_mem;
+ u8 tset_mem;
+ u8 thold_att;
+ u8 tset_att;
+};
+
+struct stm32_fmc2_nand {
+ struct nand_chip chip;
+ struct stm32_fmc2_timings timings;
+ int ncs;
+ int cs_used[FMC2_MAX_CE];
+};
+
+static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct stm32_fmc2_nand, chip);
+}
+
+struct stm32_fmc2_nfc {
+ struct nand_controller base;
+ struct stm32_fmc2_nand nand;
+ struct device *dev;
+ void __iomem *io_base;
+ void __iomem *data_base[FMC2_MAX_CE];
+ void __iomem *cmd_base[FMC2_MAX_CE];
+ void __iomem *addr_base[FMC2_MAX_CE];
+ phys_addr_t io_phys_addr;
+ phys_addr_t data_phys_addr[FMC2_MAX_CE];
+ struct clk *clk;
+ u8 irq_state;
+
+ struct dma_chan *dma_tx_ch;
+ struct dma_chan *dma_rx_ch;
+ struct dma_chan *dma_ecc_ch;
+ struct sg_table dma_data_sg;
+ struct sg_table dma_ecc_sg;
+ u8 *ecc_buf;
+ int dma_ecc_len;
+
+ struct completion complete;
+ struct completion dma_data_complete;
+ struct completion dma_ecc_complete;
+
+ u8 cs_assigned;
+ int cs_sel;
+};
+
+static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base)
+{
+ return container_of(base, struct stm32_fmc2_nfc, base);
+}
+
+/* Timings configuration */
+static void stm32_fmc2_timings_init(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *timings = &nand->timings;
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ u32 pmem, patt;
+
+ /* Set tclr/tar timings */
+ pcr &= ~FMC2_PCR_TCLR_MASK;
+ pcr |= FMC2_PCR_TCLR(timings->tclr);
+ pcr &= ~FMC2_PCR_TAR_MASK;
+ pcr |= FMC2_PCR_TAR(timings->tar);
+
+ /* Set tset/twait/thold/thiz timings in common bank */
+ pmem = FMC2_PMEM_MEMSET(timings->tset_mem);
+ pmem |= FMC2_PMEM_MEMWAIT(timings->twait);
+ pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem);
+ pmem |= FMC2_PMEM_MEMHIZ(timings->thiz);
+
+ /* Set tset/twait/thold/thiz timings in attribut bank */
+ patt = FMC2_PATT_ATTSET(timings->tset_att);
+ patt |= FMC2_PATT_ATTWAIT(timings->twait);
+ patt |= FMC2_PATT_ATTHOLD(timings->thold_att);
+ patt |= FMC2_PATT_ATTHIZ(timings->thiz);
+
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+ writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM);
+ writel_relaxed(patt, fmc2->io_base + FMC2_PATT);
+}
+
+/* Controller configuration */
+static void stm32_fmc2_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+
+ /* Configure ECC algorithm (default configuration is Hamming) */
+ pcr &= ~FMC2_PCR_ECCALG;
+ pcr &= ~FMC2_PCR_BCHECC;
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ pcr |= FMC2_PCR_ECCALG;
+ pcr |= FMC2_PCR_BCHECC;
+ } else if (chip->ecc.strength == FMC2_ECC_BCH4) {
+ pcr |= FMC2_PCR_ECCALG;
+ }
+
+ /* Set buswidth */
+ pcr &= ~FMC2_PCR_PWID_MASK;
+ if (chip->options & NAND_BUSWIDTH_16)
+ pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
+
+ /* Set ECC sector size */
+ pcr &= ~FMC2_PCR_ECCSS_MASK;
+ pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512);
+
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+}
+
+/* Select target */
+static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct dma_slave_config dma_cfg;
+ int ret;
+
+ if (nand->cs_used[chipnr] == fmc2->cs_sel)
+ return 0;
+
+ fmc2->cs_sel = nand->cs_used[chipnr];
+
+ /* FMC2 setup routine */
+ stm32_fmc2_setup(chip);
+
+ /* Apply timings */
+ stm32_fmc2_timings_init(chip);
+
+ if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) {
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel];
+ dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel];
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.src_maxburst = 32;
+ dma_cfg.dst_maxburst = 32;
+
+ ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg);
+ if (ret) {
+ dev_err(fmc2->dev, "tx DMA engine slave config failed\n");
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg);
+ if (ret) {
+ dev_err(fmc2->dev, "rx DMA engine slave config failed\n");
+ return ret;
+ }
+ }
+
+ if (fmc2->dma_ecc_ch) {
+ /*
+ * Hamming: we read HECCR register
+ * BCH4/BCH8: we read BCHDSRSx registers
+ */
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = fmc2->io_phys_addr;
+ dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR : FMC2_BCHDSR0;
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg);
+ if (ret) {
+ dev_err(fmc2->dev, "ECC DMA engine slave config failed\n");
+ return ret;
+ }
+
+ /* Calculate ECC length needed for one sector */
+ fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
+ }
+
+ return 0;
+}
+
+/* Set bus width to 16-bit or 8-bit */
+static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set)
+{
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+
+ pcr &= ~FMC2_PCR_PWID_MASK;
+ if (set)
+ pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+}
+
+/* Enable/disable ECC */
+static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable)
+{
+ u32 pcr = readl(fmc2->io_base + FMC2_PCR);
+
+ pcr &= ~FMC2_PCR_ECCEN;
+ if (enable)
+ pcr |= FMC2_PCR_ECCEN;
+ writel(pcr, fmc2->io_base + FMC2_PCR);
+}
+
+/* Enable irq sources in case of the sequencer is used */
+static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+
+ csqier |= FMC2_CSQIER_TCIE;
+
+ fmc2->irq_state = FMC2_IRQ_SEQ;
+
+ writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+}
+
+/* Disable irq sources in case of the sequencer is used */
+static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+
+ csqier &= ~FMC2_CSQIER_TCIE;
+
+ writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+
+ fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+}
+
+/* Clear irq sources in case of the sequencer is used */
+static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR);
+}
+
+/* Enable irq sources in case of bch is used */
+static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2,
+ int mode)
+{
+ u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+
+ if (mode == NAND_ECC_WRITE)
+ bchier |= FMC2_BCHIER_EPBRIE;
+ else
+ bchier |= FMC2_BCHIER_DERIE;
+
+ fmc2->irq_state = FMC2_IRQ_BCH;
+
+ writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+}
+
+/* Disable irq sources in case of bch is used */
+static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+
+ bchier &= ~FMC2_BCHIER_DERIE;
+ bchier &= ~FMC2_BCHIER_EPBRIE;
+
+ writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+
+ fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+}
+
+/* Clear irq sources in case of bch is used */
+static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2)
+{
+ writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR);
+}
+
+/*
+ * Enable ECC logic and reset syndrome/parity bits previously calculated
+ * Syndrome/parity bits is cleared by setting the ECCEN bit to 0
+ */
+static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+
+ stm32_fmc2_set_ecc(fmc2, false);
+
+ if (chip->ecc.strength != FMC2_ECC_HAM) {
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+
+ if (mode == NAND_ECC_WRITE)
+ pcr |= FMC2_PCR_WEN;
+ else
+ pcr &= ~FMC2_PCR_WEN;
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+
+ reinit_completion(&fmc2->complete);
+ stm32_fmc2_clear_bch_irq(fmc2);
+ stm32_fmc2_enable_bch_irq(fmc2, mode);
+ }
+
+ stm32_fmc2_set_ecc(fmc2, true);
+}
+
+/*
+ * ECC Hamming calculation
+ * ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
+{
+ ecc[0] = ecc_sta;
+ ecc[1] = ecc_sta >> 8;
+ ecc[2] = ecc_sta >> 16;
+}
+
+static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u32 sr, heccr;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
+ sr, sr & FMC2_SR_NWRF, 10, 1000);
+ if (ret) {
+ dev_err(fmc2->dev, "ham timeout\n");
+ return ret;
+ }
+
+ heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR);
+
+ stm32_fmc2_ham_set_ecc(heccr, ecc);
+
+ /* Disable ECC */
+ stm32_fmc2_set_ecc(fmc2, false);
+
+ return 0;
+}
+
+static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ u8 bit_position = 0, b0, b1, b2;
+ u32 byte_addr = 0, b;
+ u32 i, shifting = 1;
+
+ /* Indicate which bit and byte is faulty (if any) */
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ b2 = read_ecc[2] ^ calc_ecc[2];
+ b = b0 | (b1 << 8) | (b2 << 16);
+
+ /* No errors */
+ if (likely(!b))
+ return 0;
+
+ /* Calculate bit position */
+ for (i = 0; i < 3; i++) {
+ switch (b % 4) {
+ case 2:
+ bit_position += shifting;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Calculate byte position */
+ shifting = 1;
+ for (i = 0; i < 9; i++) {
+ switch (b % 4) {
+ case 2:
+ byte_addr += shifting;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Flip the bit */
+ dat[byte_addr] ^= (1 << bit_position);
+
+ return 1;
+}
+
+/*
+ * ECC BCH calculation and correction
+ * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
+ * max of 4-bit/8-bit)
+ */
+static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u32 bchpbr;
+
+ /* Wait until the BCH code is ready */
+ if (!wait_for_completion_timeout(&fmc2->complete,
+ msecs_to_jiffies(1000))) {
+ dev_err(fmc2->dev, "bch timeout\n");
+ stm32_fmc2_disable_bch_irq(fmc2);
+ return -ETIMEDOUT;
+ }
+
+ /* Read parity bits */
+ bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1);
+ ecc[0] = bchpbr;
+ ecc[1] = bchpbr >> 8;
+ ecc[2] = bchpbr >> 16;
+ ecc[3] = bchpbr >> 24;
+
+ bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2);
+ ecc[4] = bchpbr;
+ ecc[5] = bchpbr >> 8;
+ ecc[6] = bchpbr >> 16;
+
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ ecc[7] = bchpbr >> 24;
+
+ bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3);
+ ecc[8] = bchpbr;
+ ecc[9] = bchpbr >> 8;
+ ecc[10] = bchpbr >> 16;
+ ecc[11] = bchpbr >> 24;
+
+ bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4);
+ ecc[12] = bchpbr;
+ }
+
+ /* Disable ECC */
+ stm32_fmc2_set_ecc(fmc2, false);
+
+ return 0;
+}
+
+/* BCH algorithm correction */
+static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
+{
+ u32 bchdsr0 = ecc_sta[0];
+ u32 bchdsr1 = ecc_sta[1];
+ u32 bchdsr2 = ecc_sta[2];
+ u32 bchdsr3 = ecc_sta[3];
+ u32 bchdsr4 = ecc_sta[4];
+ u16 pos[8];
+ int i, den;
+ unsigned int nb_errs = 0;
+
+ /* No errors found */
+ if (likely(!(bchdsr0 & FMC2_BCHDSR0_DEF)))
+ return 0;
+
+ /* Too many errors detected */
+ if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
+ return -EBADMSG;
+
+ pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK;
+ pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT;
+ pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK;
+ pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT;
+ pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK;
+ pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT;
+ pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK;
+ pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT;
+
+ den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT;
+ for (i = 0; i < den; i++) {
+ if (pos[i] < eccsize * 8) {
+ change_bit(pos[i], (unsigned long *)dat);
+ nb_errs++;
+ }
+ }
+
+ return nb_errs;
+}
+
+static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u32 ecc_sta[5];
+
+ /* Wait until the decoding error is ready */
+ if (!wait_for_completion_timeout(&fmc2->complete,
+ msecs_to_jiffies(1000))) {
+ dev_err(fmc2->dev, "bch timeout\n");
+ stm32_fmc2_disable_bch_irq(fmc2);
+ return -ETIMEDOUT;
+ }
+
+ ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0);
+ ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1);
+ ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2);
+ ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3);
+ ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4);
+
+ /* Disable ECC */
+ stm32_fmc2_set_ecc(fmc2, false);
+
+ return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta);
+}
+
+static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret, i, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = mtd->writesize + FMC2_BBM_LEN, s = 0; s < eccsteps;
+ s++, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ /* Read the nand page sector (512 bytes) */
+ ret = nand_change_read_column_op(chip, s * eccsize, p,
+ eccsize, false);
+ if (ret)
+ return ret;
+
+ /* Read the corresponding ECC bytes */
+ ret = nand_change_read_column_op(chip, i, ecc_code,
+ eccbytes, false);
+ if (ret)
+ return ret;
+
+ /* Correct the data */
+ stat = chip->ecc.correct(chip, p, ecc_code, ecc_calc);
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ ecc_code, eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Read oob */
+ if (oob_required) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return max_bitflips;
+}
+
+/* Sequencer read/write configuration */
+static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
+ int raw, bool write_data)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 csqcfgr1, csqcfgr2, csqcfgr3;
+ u32 csqar1, csqar2;
+ u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+
+ if (write_data)
+ pcr |= FMC2_PCR_WEN;
+ else
+ pcr &= ~FMC2_PCR_WEN;
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+
+ /*
+ * - Set Program Page/Page Read command
+ * - Enable DMA request data
+ * - Set timings
+ */
+ csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
+ if (write_data)
+ csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN);
+ else
+ csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) |
+ FMC2_CSQCFGR1_CMD2EN |
+ FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) |
+ FMC2_CSQCFGR1_CMD2T;
+
+ /*
+ * - Set Random Data Input/Random Data Read command
+ * - Enable the sequencer to access the Spare data area
+ * - Enable DMA request status decoding for read
+ * - Set timings
+ */
+ if (write_data)
+ csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN);
+ else
+ csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) |
+ FMC2_CSQCFGR2_RCMD2EN |
+ FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) |
+ FMC2_CSQCFGR2_RCMD1T |
+ FMC2_CSQCFGR2_RCMD2T;
+ if (!raw) {
+ csqcfgr2 |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
+ csqcfgr2 |= FMC2_CSQCFGR2_SQSDTEN;
+ }
+
+ /*
+ * - Set the number of sectors to be written
+ * - Set timings
+ */
+ csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1);
+ if (write_data) {
+ csqcfgr3 |= FMC2_CSQCFGR3_RAC2T;
+ if (chip->options & NAND_ROW_ADDR_3)
+ csqcfgr3 |= FMC2_CSQCFGR3_AC5T;
+ else
+ csqcfgr3 |= FMC2_CSQCFGR3_AC4T;
+ }
+
+ /*
+ * Set the fourth first address cycles
+ * Byte 1 and byte 2 => column, we start at 0x0
+ * Byte 3 and byte 4 => page
+ */
+ csqar1 = FMC2_CSQCAR1_ADDC3(page);
+ csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8);
+
+ /*
+ * - Set chip enable number
+ * - Set ECC byte offset in the spare area
+ * - Calculate the number of address cycles to be issued
+ * - Set byte 5 of address cycle if needed
+ */
+ csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel);
+ if (chip->options & NAND_BUSWIDTH_16)
+ csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1);
+ else
+ csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset);
+ if (chip->options & NAND_ROW_ADDR_3) {
+ csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5);
+ csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16);
+ } else {
+ csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4);
+ }
+
+ writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1);
+ writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2);
+ writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3);
+ writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1);
+ writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2);
+}
+
+static void stm32_fmc2_dma_callback(void *arg)
+{
+ complete((struct completion *)arg);
+}
+
+/* Read/write data from/to a page */
+static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
+ int raw, bool write_data)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct dma_async_tx_descriptor *desc_data, *desc_ecc;
+ struct scatterlist *sg;
+ struct dma_chan *dma_ch = fmc2->dma_rx_ch;
+ enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
+ enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
+ u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR);
+ int eccsteps = chip->ecc.steps;
+ int eccsize = chip->ecc.size;
+ const u8 *p = buf;
+ int s, ret;
+
+ /* Configure DMA data */
+ if (write_data) {
+ dma_data_dir = DMA_TO_DEVICE;
+ dma_transfer_dir = DMA_MEM_TO_DEV;
+ dma_ch = fmc2->dma_tx_ch;
+ }
+
+ for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, eccsize);
+ p += eccsize;
+ }
+
+ ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl,
+ eccsteps, dma_data_dir);
+ if (ret < 0)
+ return ret;
+
+ desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl,
+ eccsteps, dma_transfer_dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc_data) {
+ ret = -ENOMEM;
+ goto err_unmap_data;
+ }
+
+ reinit_completion(&fmc2->dma_data_complete);
+ reinit_completion(&fmc2->complete);
+ desc_data->callback = stm32_fmc2_dma_callback;
+ desc_data->callback_param = &fmc2->dma_data_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_data));
+ if (ret)
+ goto err_unmap_data;
+
+ dma_async_issue_pending(dma_ch);
+
+ if (!write_data && !raw) {
+ /* Configure DMA ECC status */
+ p = fmc2->ecc_buf;
+ for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, fmc2->dma_ecc_len);
+ p += fmc2->dma_ecc_len;
+ }
+
+ ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+ eccsteps, dma_data_dir);
+ if (ret < 0)
+ goto err_unmap_data;
+
+ desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch,
+ fmc2->dma_ecc_sg.sgl,
+ eccsteps, dma_transfer_dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc_ecc) {
+ ret = -ENOMEM;
+ goto err_unmap_ecc;
+ }
+
+ reinit_completion(&fmc2->dma_ecc_complete);
+ desc_ecc->callback = stm32_fmc2_dma_callback;
+ desc_ecc->callback_param = &fmc2->dma_ecc_complete;
+ ret = dma_submit_error(dmaengine_submit(desc_ecc));
+ if (ret)
+ goto err_unmap_ecc;
+
+ dma_async_issue_pending(fmc2->dma_ecc_ch);
+ }
+
+ stm32_fmc2_clear_seq_irq(fmc2);
+ stm32_fmc2_enable_seq_irq(fmc2);
+
+ /* Start the transfer */
+ csqcr |= FMC2_CSQCR_CSQSTART;
+ writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR);
+
+ /* Wait end of sequencer transfer */
+ if (!wait_for_completion_timeout(&fmc2->complete,
+ msecs_to_jiffies(1000))) {
+ dev_err(fmc2->dev, "seq timeout\n");
+ stm32_fmc2_disable_seq_irq(fmc2);
+ dmaengine_terminate_all(dma_ch);
+ if (!write_data && !raw)
+ dmaengine_terminate_all(fmc2->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+ goto err_unmap_ecc;
+ }
+
+ /* Wait DMA data transfer completion */
+ if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
+ msecs_to_jiffies(100))) {
+ dev_err(fmc2->dev, "data DMA timeout\n");
+ dmaengine_terminate_all(dma_ch);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait DMA ECC transfer completion */
+ if (!write_data && !raw) {
+ if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
+ msecs_to_jiffies(100))) {
+ dev_err(fmc2->dev, "ECC DMA timeout\n");
+ dmaengine_terminate_all(fmc2->dma_ecc_ch);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+err_unmap_ecc:
+ if (!write_data && !raw)
+ dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+ eccsteps, dma_data_dir);
+
+err_unmap_data:
+ dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir);
+
+ return ret;
+}
+
+static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_rw_page_init(chip, page, raw, true);
+
+ /* Write the page */
+ ret = stm32_fmc2_xfer(chip, buf, raw, true);
+ if (ret)
+ return ret;
+
+ /* Write oob */
+ if (oob_required) {
+ ret = nand_change_write_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ int ret;
+
+ /* Select the target */
+ ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false);
+}
+
+static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ int ret;
+
+ /* Select the target */
+ ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true);
+}
+
+/* Get a status indicating which sectors have errors */
+static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR);
+
+ return csqemsr & FMC2_CSQEMSR_SEM;
+}
+
+static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ int i, s, eccsize = chip->ecc.size;
+ u32 *ecc_sta = (u32 *)fmc2->ecc_buf;
+ u16 sta_map = stm32_fmc2_get_mapping_status(fmc2);
+ unsigned int max_bitflips = 0;
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
+ int stat = 0;
+
+ if (eccstrength == FMC2_ECC_HAM) {
+ /* Ecc_sta = FMC2_HECCR */
+ if (sta_map & BIT(s)) {
+ stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]);
+ stat = stm32_fmc2_ham_correct(chip, dat,
+ &read_ecc[i],
+ &calc_ecc[i]);
+ }
+ ecc_sta++;
+ } else {
+ /*
+ * Ecc_sta[0] = FMC2_BCHDSR0
+ * Ecc_sta[1] = FMC2_BCHDSR1
+ * Ecc_sta[2] = FMC2_BCHDSR2
+ * Ecc_sta[3] = FMC2_BCHDSR3
+ * Ecc_sta[4] = FMC2_BCHDSR4
+ */
+ if (sta_map & BIT(s))
+ stat = stm32_fmc2_bch_decode(eccsize, dat,
+ ecc_sta);
+ ecc_sta += 5;
+ }
+
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(dat, eccsize,
+ &read_ecc[i],
+ eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ u16 sta_map;
+ int ret;
+
+ /* Select the target */
+ ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_rw_page_init(chip, page, 0, false);
+
+ /* Read the page */
+ ret = stm32_fmc2_xfer(chip, buf, 0, false);
+ if (ret)
+ return ret;
+
+ sta_map = stm32_fmc2_get_mapping_status(fmc2);
+
+ /* Check if errors happen */
+ if (likely(!sta_map)) {
+ if (oob_required)
+ return nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+
+ return 0;
+ }
+
+ /* Read oob */
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* Correct data */
+ return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
+}
+
+static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /* Select the target */
+ ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ if (ret)
+ return ret;
+
+ /* Configure the sequencer */
+ stm32_fmc2_rw_page_init(chip, page, 1, false);
+
+ /* Read the page */
+ ret = stm32_fmc2_xfer(chip, buf, 1, false);
+ if (ret)
+ return ret;
+
+ /* Read oob */
+ if (oob_required)
+ return nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+
+ return 0;
+}
+
+static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id)
+{
+ struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id;
+
+ if (fmc2->irq_state == FMC2_IRQ_SEQ)
+ /* Sequencer is used */
+ stm32_fmc2_disable_seq_irq(fmc2);
+ else if (fmc2->irq_state == FMC2_IRQ_BCH)
+ /* BCH is used */
+ stm32_fmc2_disable_bch_irq(fmc2);
+
+ complete(&fmc2->complete);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel];
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 8-bit */
+ stm32_fmc2_set_buswidth_16(fmc2, false);
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
+ *(u8 *)buf = readb_relaxed(io_addr_r);
+ buf += sizeof(u8);
+ len -= sizeof(u8);
+ }
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ len >= sizeof(u16)) {
+ *(u16 *)buf = readw_relaxed(io_addr_r);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+ }
+
+ /* Buf is aligned */
+ while (len >= sizeof(u32)) {
+ *(u32 *)buf = readl_relaxed(io_addr_r);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ /* Read remaining bytes */
+ if (len >= sizeof(u16)) {
+ *(u16 *)buf = readw_relaxed(io_addr_r);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+
+ if (len)
+ *(u8 *)buf = readb_relaxed(io_addr_r);
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 16-bit */
+ stm32_fmc2_set_buswidth_16(fmc2, true);
+}
+
+static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel];
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 8-bit */
+ stm32_fmc2_set_buswidth_16(fmc2, false);
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
+ writeb_relaxed(*(u8 *)buf, io_addr_w);
+ buf += sizeof(u8);
+ len -= sizeof(u8);
+ }
+
+ if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ len >= sizeof(u16)) {
+ writew_relaxed(*(u16 *)buf, io_addr_w);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+ }
+
+ /* Buf is aligned */
+ while (len >= sizeof(u32)) {
+ writel_relaxed(*(u32 *)buf, io_addr_w);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ /* Write remaining bytes */
+ if (len >= sizeof(u16)) {
+ writew_relaxed(*(u16 *)buf, io_addr_w);
+ buf += sizeof(u16);
+ len -= sizeof(u16);
+ }
+
+ if (len)
+ writeb_relaxed(*(u8 *)buf, io_addr_w);
+
+ if (force_8bit && chip->options & NAND_BUSWIDTH_16)
+ /* Reconfigure bus width to 16-bit */
+ stm32_fmc2_set_buswidth_16(fmc2, true);
+}
+
+static int stm32_fmc2_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id, i;
+ int ret;
+
+ ret = stm32_fmc2_select_chip(chip, op->cs);
+ if (ret)
+ return ret;
+
+ if (check_only)
+ return ret;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb_relaxed(instr->ctx.cmd.opcode,
+ fmc2->cmd_base[fmc2->cs_sel]);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb_relaxed(instr->ctx.addr.addrs[i],
+ fmc2->addr_base[fmc2->cs_sel]);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ stm32_fmc2_read_data(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ stm32_fmc2_write_data(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = nand_soft_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Controller initialization */
+static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
+{
+ u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1);
+
+ /* Set CS used to undefined */
+ fmc2->cs_sel = -1;
+
+ /* Enable wait feature and nand flash memory bank */
+ pcr |= FMC2_PCR_PWAITEN;
+ pcr |= FMC2_PCR_PBKEN;
+
+ /* Set buswidth to 8 bits mode for identification */
+ pcr &= ~FMC2_PCR_PWID_MASK;
+
+ /* ECC logic is disabled */
+ pcr &= ~FMC2_PCR_ECCEN;
+
+ /* Default mode */
+ pcr &= ~FMC2_PCR_ECCALG;
+ pcr &= ~FMC2_PCR_BCHECC;
+ pcr &= ~FMC2_PCR_WEN;
+
+ /* Set default ECC sector size */
+ pcr &= ~FMC2_PCR_ECCSS_MASK;
+ pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048);
+
+ /* Set default tclr/tar timings */
+ pcr &= ~FMC2_PCR_TCLR_MASK;
+ pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT);
+ pcr &= ~FMC2_PCR_TAR_MASK;
+ pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT);
+
+ /* Enable FMC2 controller */
+ bcr1 |= FMC2_BCR1_FMC2EN;
+
+ writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1);
+ writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+ writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM);
+ writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT);
+}
+
+/* Controller timings */
+static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ const struct nand_sdr_timings *sdrt)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *tims = &nand->timings;
+ unsigned long hclk = clk_get_rate(fmc2->clk);
+ unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
+ int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att;
+
+ tar = hclkp;
+ if (tar < sdrt->tAR_min)
+ tar = sdrt->tAR_min;
+ tims->tar = DIV_ROUND_UP(tar, hclkp) - 1;
+ if (tims->tar > FMC2_PCR_TIMING_MASK)
+ tims->tar = FMC2_PCR_TIMING_MASK;
+
+ tclr = hclkp;
+ if (tclr < sdrt->tCLR_min)
+ tclr = sdrt->tCLR_min;
+ tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1;
+ if (tims->tclr > FMC2_PCR_TIMING_MASK)
+ tims->tclr = FMC2_PCR_TIMING_MASK;
+
+ tims->thiz = FMC2_THIZ;
+ thiz = (tims->thiz + 1) * hclkp;
+
+ /*
+ * tWAIT > tRP
+ * tWAIT > tWP
+ * tWAIT > tREA + tIO
+ */
+ twait = hclkp;
+ if (twait < sdrt->tRP_min)
+ twait = sdrt->tRP_min;
+ if (twait < sdrt->tWP_min)
+ twait = sdrt->tWP_min;
+ if (twait < sdrt->tREA_max + FMC2_TIO)
+ twait = sdrt->tREA_max + FMC2_TIO;
+ tims->twait = DIV_ROUND_UP(twait, hclkp);
+ if (tims->twait == 0)
+ tims->twait = 1;
+ else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->twait = FMC2_PMEM_PATT_TIMING_MASK;
+
+ /*
+ * tSETUP_MEM > tCS - tWAIT
+ * tSETUP_MEM > tALS - tWAIT
+ * tSETUP_MEM > tDS - (tWAIT - tHIZ)
+ */
+ tset_mem = hclkp;
+ if (sdrt->tCS_min > twait && (tset_mem < sdrt->tCS_min - twait))
+ tset_mem = sdrt->tCS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_mem < sdrt->tALS_min - twait))
+ tset_mem = sdrt->tALS_min - twait;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_mem < sdrt->tDS_min - (twait - thiz)))
+ tset_mem = sdrt->tDS_min - (twait - thiz);
+ tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp);
+ if (tims->tset_mem == 0)
+ tims->tset_mem = 1;
+ else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK;
+
+ /*
+ * tHOLD_MEM > tCH
+ * tHOLD_MEM > tREH - tSETUP_MEM
+ * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
+ */
+ thold_mem = hclkp;
+ if (thold_mem < sdrt->tCH_min)
+ thold_mem = sdrt->tCH_min;
+ if (sdrt->tREH_min > tset_mem &&
+ (thold_mem < sdrt->tREH_min - tset_mem))
+ thold_mem = sdrt->tREH_min - tset_mem;
+ if ((sdrt->tRC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tRC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tRC_min - (tset_mem + twait);
+ if ((sdrt->tWC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tWC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tWC_min - (tset_mem + twait);
+ tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp);
+ if (tims->thold_mem == 0)
+ tims->thold_mem = 1;
+ else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK;
+
+ /*
+ * tSETUP_ATT > tCS - tWAIT
+ * tSETUP_ATT > tCLS - tWAIT
+ * tSETUP_ATT > tALS - tWAIT
+ * tSETUP_ATT > tRHW - tHOLD_MEM
+ * tSETUP_ATT > tDS - (tWAIT - tHIZ)
+ */
+ tset_att = hclkp;
+ if (sdrt->tCS_min > twait && (tset_att < sdrt->tCS_min - twait))
+ tset_att = sdrt->tCS_min - twait;
+ if (sdrt->tCLS_min > twait && (tset_att < sdrt->tCLS_min - twait))
+ tset_att = sdrt->tCLS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_att < sdrt->tALS_min - twait))
+ tset_att = sdrt->tALS_min - twait;
+ if (sdrt->tRHW_min > thold_mem &&
+ (tset_att < sdrt->tRHW_min - thold_mem))
+ tset_att = sdrt->tRHW_min - thold_mem;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_att < sdrt->tDS_min - (twait - thiz)))
+ tset_att = sdrt->tDS_min - (twait - thiz);
+ tims->tset_att = DIV_ROUND_UP(tset_att, hclkp);
+ if (tims->tset_att == 0)
+ tims->tset_att = 1;
+ else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK;
+
+ /*
+ * tHOLD_ATT > tALH
+ * tHOLD_ATT > tCH
+ * tHOLD_ATT > tCLH
+ * tHOLD_ATT > tCOH
+ * tHOLD_ATT > tDH
+ * tHOLD_ATT > tWB + tIO + tSYNC - tSETUP_MEM
+ * tHOLD_ATT > tADL - tSETUP_MEM
+ * tHOLD_ATT > tWH - tSETUP_MEM
+ * tHOLD_ATT > tWHR - tSETUP_MEM
+ * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
+ * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
+ */
+ thold_att = hclkp;
+ if (thold_att < sdrt->tALH_min)
+ thold_att = sdrt->tALH_min;
+ if (thold_att < sdrt->tCH_min)
+ thold_att = sdrt->tCH_min;
+ if (thold_att < sdrt->tCLH_min)
+ thold_att = sdrt->tCLH_min;
+ if (thold_att < sdrt->tCOH_min)
+ thold_att = sdrt->tCOH_min;
+ if (thold_att < sdrt->tDH_min)
+ thold_att = sdrt->tDH_min;
+ if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
+ (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
+ thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
+ if (sdrt->tADL_min > tset_mem &&
+ (thold_att < sdrt->tADL_min - tset_mem))
+ thold_att = sdrt->tADL_min - tset_mem;
+ if (sdrt->tWH_min > tset_mem &&
+ (thold_att < sdrt->tWH_min - tset_mem))
+ thold_att = sdrt->tWH_min - tset_mem;
+ if (sdrt->tWHR_min > tset_mem &&
+ (thold_att < sdrt->tWHR_min - tset_mem))
+ thold_att = sdrt->tWHR_min - tset_mem;
+ if ((sdrt->tRC_min > tset_att + twait) &&
+ (thold_att < sdrt->tRC_min - (tset_att + twait)))
+ thold_att = sdrt->tRC_min - (tset_att + twait);
+ if ((sdrt->tWC_min > tset_att + twait) &&
+ (thold_att < sdrt->tWC_min - (tset_att + twait)))
+ thold_att = sdrt->tWC_min - (tset_att + twait);
+ tims->thold_att = DIV_ROUND_UP(thold_att, hclkp);
+ if (tims->thold_att == 0)
+ tims->thold_att = 1;
+ else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK)
+ tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK;
+}
+
+static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ const struct nand_sdr_timings *sdrt;
+
+ sdrt = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdrt))
+ return PTR_ERR(sdrt);
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ stm32_fmc2_calc_timings(chip, sdrt);
+
+ /* Apply timings */
+ stm32_fmc2_timings_init(chip);
+
+ return 0;
+}
+
+/* DMA configuration */
+static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
+{
+ int ret;
+
+ fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx");
+ fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx");
+ fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc");
+
+ if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) {
+ dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n");
+ return 0;
+ }
+
+ ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ /* Allocate a buffer to store ECC status registers */
+ fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN,
+ GFP_KERNEL);
+ if (!fmc2->ecc_buf)
+ return -ENOMEM;
+
+ ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ init_completion(&fmc2->dma_data_complete);
+ init_completion(&fmc2->dma_ecc_complete);
+
+ return 0;
+}
+
+/* NAND callbacks setup */
+static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+
+ /*
+ * Specific callbacks to read/write a page depending on
+ * the mode (polling/sequencer) and the algo used (Hamming, BCH).
+ */
+ if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) {
+ /* DMA => use sequencer mode callbacks */
+ chip->ecc.correct = stm32_fmc2_sequencer_correct;
+ chip->ecc.write_page = stm32_fmc2_sequencer_write_page;
+ chip->ecc.read_page = stm32_fmc2_sequencer_read_page;
+ chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw;
+ chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw;
+ } else {
+ /* No DMA => use polling mode callbacks */
+ chip->ecc.hwctl = stm32_fmc2_hwctl;
+ if (chip->ecc.strength == FMC2_ECC_HAM) {
+ /* Hamming is used */
+ chip->ecc.calculate = stm32_fmc2_ham_calculate;
+ chip->ecc.correct = stm32_fmc2_ham_correct;
+ chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
+ } else {
+ /* BCH is used */
+ chip->ecc.calculate = stm32_fmc2_bch_calculate;
+ chip->ecc.correct = stm32_fmc2_bch_correct;
+ chip->ecc.read_page = stm32_fmc2_read_page;
+ }
+ }
+
+ /* Specific configurations depending on the algo used */
+ if (chip->ecc.strength == FMC2_ECC_HAM)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 4 : 3;
+ else if (chip->ecc.strength == FMC2_ECC_BCH8)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 14 : 13;
+ else
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
+}
+
+/* FMC2 layout */
+static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = FMC2_BBM_LEN;
+
+ return 0;
+}
+
+static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - FMC2_BBM_LEN;
+ oobregion->offset = ecc->total + FMC2_BBM_LEN;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = {
+ .ecc = stm32_fmc2_nand_ooblayout_ecc,
+ .free = stm32_fmc2_nand_ooblayout_free,
+};
+
+/* FMC2 caps */
+static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
+{
+ /* Hamming */
+ if (strength == FMC2_ECC_HAM)
+ return 4;
+
+ /* BCH8 */
+ if (strength == FMC2_ECC_BCH8)
+ return 14;
+
+ /* BCH4 */
+ return 8;
+}
+
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes,
+ FMC2_ECC_STEP_SIZE,
+ FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
+
+/* FMC2 controller ops */
+static int stm32_fmc2_attach_chip(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /*
+ * Only NAND_ECC_HW mode is actually supported
+ * Hamming => ecc.strength = 1
+ * BCH4 => ecc.strength = 4
+ * BCH8 => ecc.strength = 8
+ * ECC sector size = 512
+ */
+ if (chip->ecc.mode != NAND_ECC_HW) {
+ dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n");
+ return -EINVAL;
+ }
+
+ ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps,
+ mtd->oobsize - FMC2_BBM_LEN);
+ if (ret) {
+ dev_err(fmc2->dev, "no valid ECC settings set\n");
+ return ret;
+ }
+
+ if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
+ dev_err(fmc2->dev, "nand page size is not supported\n");
+ return -EINVAL;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ /* NAND callbacks setup */
+ stm32_fmc2_nand_callbacks_setup(chip);
+
+ /* Define ECC layout */
+ mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops);
+
+ /* Configure bus width to 16-bit */
+ if (chip->options & NAND_BUSWIDTH_16)
+ stm32_fmc2_set_buswidth_16(fmc2, true);
+
+ return 0;
+}
+
+static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = {
+ .attach_chip = stm32_fmc2_attach_chip,
+ .exec_op = stm32_fmc2_exec_op,
+ .setup_data_interface = stm32_fmc2_setup_interface,
+};
+
+/* FMC2 probe */
+static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
+ struct device_node *dn)
+{
+ struct stm32_fmc2_nand *nand = &fmc2->nand;
+ u32 cs;
+ int ret, i;
+
+ if (!of_get_property(dn, "reg", &nand->ncs))
+ return -EINVAL;
+
+ nand->ncs /= sizeof(u32);
+ if (!nand->ncs) {
+ dev_err(fmc2->dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nand->ncs; i++) {
+ ret = of_property_read_u32_index(dn, "reg", i, &cs);
+ if (ret) {
+ dev_err(fmc2->dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs > FMC2_MAX_CE) {
+ dev_err(fmc2->dev, "invalid reg value: %d\n", cs);
+ return -EINVAL;
+ }
+
+ if (fmc2->cs_assigned & BIT(cs)) {
+ dev_err(fmc2->dev, "cs already assigned: %d\n", cs);
+ return -EINVAL;
+ }
+
+ fmc2->cs_assigned |= BIT(cs);
+ nand->cs_used[i] = cs;
+ }
+
+ nand_set_flash_node(&nand->chip, dn);
+
+ return 0;
+}
+
+static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
+{
+ struct device_node *dn = fmc2->dev->of_node;
+ struct device_node *child;
+ int nchips = of_get_child_count(dn);
+ int ret = 0;
+
+ if (!nchips) {
+ dev_err(fmc2->dev, "NAND chip not defined\n");
+ return -EINVAL;
+ }
+
+ if (nchips > 1) {
+ dev_err(fmc2->dev, "too many NAND chips defined\n");
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(dn, child) {
+ ret = stm32_fmc2_parse_child(fmc2, child);
+ if (ret < 0) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int stm32_fmc2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ struct stm32_fmc2_nfc *fmc2;
+ struct stm32_fmc2_nand *nand;
+ struct resource *res;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int chip_cs, mem_region, ret, irq;
+
+ fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL);
+ if (!fmc2)
+ return -ENOMEM;
+
+ fmc2->dev = dev;
+ nand_controller_init(&fmc2->base);
+ fmc2->base.ops = &stm32_fmc2_nand_controller_ops;
+
+ ret = stm32_fmc2_parse_dt(fmc2);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fmc2->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fmc2->io_base))
+ return PTR_ERR(fmc2->io_base);
+
+ fmc2->io_phys_addr = res->start;
+
+ for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE;
+ chip_cs++, mem_region += 3) {
+ if (!(fmc2->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
+ fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fmc2->data_base[chip_cs]))
+ return PTR_ERR(fmc2->data_base[chip_cs]);
+
+ fmc2->data_phys_addr[chip_cs] = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ mem_region + 1);
+ fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fmc2->cmd_base[chip_cs]))
+ return PTR_ERR(fmc2->cmd_base[chip_cs]);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ mem_region + 2);
+ fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fmc2->addr_base[chip_cs]))
+ return PTR_ERR(fmc2->addr_base[chip_cs]);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
+ dev_name(dev), fmc2);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ init_completion(&fmc2->complete);
+
+ fmc2->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(fmc2->clk))
+ return PTR_ERR(fmc2->clk);
+
+ ret = clk_prepare_enable(fmc2->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ rstc = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+ }
+
+ /* DMA setup */
+ ret = stm32_fmc2_dma_setup(fmc2);
+ if (ret)
+ return ret;
+
+ /* FMC2 init routine */
+ stm32_fmc2_init(fmc2);
+
+ nand = &fmc2->nand;
+ chip = &nand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ chip->controller = &fmc2->base;
+ chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+ NAND_USE_BOUNCE_BUFFER;
+
+ /* Default ECC settings */
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = FMC2_ECC_STEP_SIZE;
+ chip->ecc.strength = FMC2_ECC_BCH8;
+
+ /* Scan to find existence of the device */
+ ret = nand_scan(chip, nand->ncs);
+ if (ret)
+ goto err_scan;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err_device_register;
+
+ platform_set_drvdata(pdev, fmc2);
+
+ return 0;
+
+err_device_register:
+ nand_cleanup(chip);
+
+err_scan:
+ if (fmc2->dma_ecc_ch)
+ dma_release_channel(fmc2->dma_ecc_ch);
+ if (fmc2->dma_tx_ch)
+ dma_release_channel(fmc2->dma_tx_ch);
+ if (fmc2->dma_rx_ch)
+ dma_release_channel(fmc2->dma_rx_ch);
+
+ sg_free_table(&fmc2->dma_data_sg);
+ sg_free_table(&fmc2->dma_ecc_sg);
+
+ clk_disable_unprepare(fmc2->clk);
+
+ return ret;
+}
+
+static int stm32_fmc2_remove(struct platform_device *pdev)
+{
+ struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev);
+ struct stm32_fmc2_nand *nand = &fmc2->nand;
+
+ nand_release(&nand->chip);
+
+ if (fmc2->dma_ecc_ch)
+ dma_release_channel(fmc2->dma_ecc_ch);
+ if (fmc2->dma_tx_ch)
+ dma_release_channel(fmc2->dma_tx_ch);
+ if (fmc2->dma_rx_ch)
+ dma_release_channel(fmc2->dma_rx_ch);
+
+ sg_free_table(&fmc2->dma_data_sg);
+ sg_free_table(&fmc2->dma_ecc_sg);
+
+ clk_disable_unprepare(fmc2->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_suspend(struct device *dev)
+{
+ struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(fmc2->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_resume(struct device *dev)
+{
+ struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
+ struct stm32_fmc2_nand *nand = &fmc2->nand;
+ int chip_cs, ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = clk_prepare_enable(fmc2->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ stm32_fmc2_init(fmc2);
+
+ for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
+ if (!(fmc2->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ nand_reset(&nand->chip, chip_cs);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend,
+ stm32_fmc2_resume);
+
+static const struct of_device_id stm32_fmc2_match[] = {
+ {.compatible = "st,stm32mp15-fmc2"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_fmc2_match);
+
+static struct platform_driver stm32_fmc2_driver = {
+ .probe = stm32_fmc2_probe,
+ .remove = stm32_fmc2_remove,
+ .driver = {
+ .name = "stm32_fmc2_nand",
+ .of_match_table = stm32_fmc2_match,
+ .pm = &stm32_fmc2_pm_ops,
+ },
+};
+module_platform_driver(stm32_fmc2_driver);
+
+MODULE_ALIAS("platform:stm32_fmc2_nand");
+MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index e828ee50a201..4282bc477761 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
*
@@ -10,16 +11,6 @@
*
* Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
* Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/dma-mapping.h>
@@ -163,38 +154,36 @@
#define NFC_MAX_CS 7
-/*
- * Chip Select structure: stores information related to NAND Chip Select
+/**
+ * struct sunxi_nand_chip_sel - stores information related to NAND Chip Select
*
- * @cs: the NAND CS id used to communicate with a NAND Chip
- * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the
- * NFC
+ * @cs: the NAND CS id used to communicate with a NAND Chip
+ * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC
*/
struct sunxi_nand_chip_sel {
u8 cs;
s8 rb;
};
-/*
- * sunxi HW ECC infos: stores information related to HW ECC support
+/**
+ * struct sunxi_nand_hw_ecc - stores information related to HW ECC support
*
- * @mode: the sunxi ECC mode field deduced from ECC requirements
+ * @mode: the sunxi ECC mode field deduced from ECC requirements
*/
struct sunxi_nand_hw_ecc {
int mode;
};
-/*
- * NAND chip structure: stores NAND chip device related information
+/**
+ * struct sunxi_nand_chip - stores NAND chip device related information
*
- * @node: used to store NAND chips into a list
- * @nand: base NAND chip structure
- * @mtd: base MTD structure
- * @clk_rate: clk_rate required for this NAND chip
- * @timing_cfg TIMING_CFG register value for this NAND chip
- * @selected: current active CS
- * @nsels: number of CS lines required by the NAND chip
- * @sels: array of CS lines descriptions
+ * @node: used to store NAND chips into a list
+ * @nand: base NAND chip structure
+ * @clk_rate: clk_rate required for this NAND chip
+ * @timing_cfg: TIMING_CFG register value for this NAND chip
+ * @timing_ctl: TIMING_CTL register value for this NAND chip
+ * @nsels: number of CS lines required by the NAND chip
+ * @sels: array of CS lines descriptions
*/
struct sunxi_nand_chip {
struct list_head node;
@@ -202,11 +191,6 @@ struct sunxi_nand_chip {
unsigned long clk_rate;
u32 timing_cfg;
u32 timing_ctl;
- int selected;
- int addr_cycles;
- u32 addr[2];
- int cmd_cycles;
- u8 cmd[2];
int nsels;
struct sunxi_nand_chip_sel sels[0];
};
@@ -216,20 +200,21 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
return container_of(nand, struct sunxi_nand_chip, nand);
}
-/*
- * NAND Controller structure: stores sunxi NAND controller information
+/**
+ * struct sunxi_nfc - stores sunxi NAND controller information
*
- * @controller: base controller structure
- * @dev: parent device (used to print error messages)
- * @regs: NAND controller registers
- * @ahb_clk: NAND Controller AHB clock
- * @mod_clk: NAND Controller mod clock
- * @assigned_cs: bitmask describing already assigned CS lines
- * @clk_rate: NAND controller current clock rate
- * @chips: a list containing all the NAND chips attached to
- * this NAND controller
- * @complete: a completion object used to wait for NAND
- * controller events
+ * @controller: base controller structure
+ * @dev: parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ahb_clk: NAND controller AHB clock
+ * @mod_clk: NAND controller mod clock
+ * @reset: NAND controller reset line
+ * @assigned_cs: bitmask describing already assigned CS lines
+ * @clk_rate: NAND controller current clock rate
+ * @chips: a list containing all the NAND chips attached to this NAND
+ * controller
+ * @complete: a completion object used to wait for NAND controller events
+ * @dmac: the DMA channel attached to the NAND controller
*/
struct sunxi_nfc {
struct nand_controller controller;
@@ -339,13 +324,11 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
return ret;
}
-static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
+static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
int chunksize, int nchunks,
enum dma_data_direction ddir,
struct scatterlist *sg)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct dma_async_tx_descriptor *dmad;
enum dma_transfer_direction tdir;
dma_cookie_t dmat;
@@ -388,38 +371,16 @@ err_unmap_buf:
return ret;
}
-static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
+static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc,
enum dma_data_direction ddir,
struct scatterlist *sg)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-
dma_unmap_sg(nfc->dev, sg, 1, ddir);
writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
nfc->regs + NFC_REG_CTL);
}
-static int sunxi_nfc_dev_ready(struct nand_chip *nand)
-{
- struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
- struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
- u32 mask;
-
- if (sunxi_nand->selected < 0)
- return 0;
-
- if (sunxi_nand->sels[sunxi_nand->selected].rb < 0) {
- dev_err(nfc->dev, "cannot check R/B NAND status!\n");
- return 0;
- }
-
- mask = NFC_RB_STATE(sunxi_nand->sels[sunxi_nand->selected].rb);
-
- return !!(readl(nfc->regs + NFC_REG_ST) & mask);
-}
-
-static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip)
+static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
@@ -427,40 +388,27 @@ static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip)
struct sunxi_nand_chip_sel *sel;
u32 ctl;
- if (chip > 0 && chip >= sunxi_nand->nsels)
- return;
-
- if (chip == sunxi_nand->selected)
+ if (cs > 0 && cs >= sunxi_nand->nsels)
return;
ctl = readl(nfc->regs + NFC_REG_CTL) &
~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
- if (chip >= 0) {
- sel = &sunxi_nand->sels[chip];
+ sel = &sunxi_nand->sels[cs];
+ ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift);
+ if (sel->rb >= 0)
+ ctl |= NFC_RB_SEL(sel->rb);
- ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
- NFC_PAGE_SHIFT(nand->page_shift);
- if (sel->rb < 0) {
- nand->legacy.dev_ready = NULL;
- } else {
- nand->legacy.dev_ready = sunxi_nfc_dev_ready;
- ctl |= NFC_RB_SEL(sel->rb);
- }
-
- writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
- if (nfc->clk_rate != sunxi_nand->clk_rate) {
- clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
- nfc->clk_rate = sunxi_nand->clk_rate;
- }
+ if (nfc->clk_rate != sunxi_nand->clk_rate) {
+ clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+ nfc->clk_rate = sunxi_nand->clk_rate;
}
writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
writel(ctl, nfc->regs + NFC_REG_CTL);
-
- sunxi_nand->selected = chip;
}
static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len)
@@ -537,71 +485,6 @@ static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf,
}
}
-static uint8_t sunxi_nfc_read_byte(struct nand_chip *nand)
-{
- uint8_t ret = 0;
-
- sunxi_nfc_read_buf(nand, &ret, 1);
-
- return ret;
-}
-
-static void sunxi_nfc_cmd_ctrl(struct nand_chip *nand, int dat,
- unsigned int ctrl)
-{
- struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
- struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
- int ret;
-
- if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
- !(ctrl & (NAND_CLE | NAND_ALE))) {
- u32 cmd = 0;
-
- if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
- return;
-
- if (sunxi_nand->cmd_cycles--)
- cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
-
- if (sunxi_nand->cmd_cycles--) {
- cmd |= NFC_SEND_CMD2;
- writel(sunxi_nand->cmd[1],
- nfc->regs + NFC_REG_RCMD_SET);
- }
-
- sunxi_nand->cmd_cycles = 0;
-
- if (sunxi_nand->addr_cycles) {
- cmd |= NFC_SEND_ADR |
- NFC_ADR_NUM(sunxi_nand->addr_cycles);
- writel(sunxi_nand->addr[0],
- nfc->regs + NFC_REG_ADDR_LOW);
- }
-
- if (sunxi_nand->addr_cycles > 4)
- writel(sunxi_nand->addr[1],
- nfc->regs + NFC_REG_ADDR_HIGH);
-
- ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
- if (ret)
- return;
-
- writel(cmd, nfc->regs + NFC_REG_CMD);
- sunxi_nand->addr[0] = 0;
- sunxi_nand->addr[1] = 0;
- sunxi_nand->addr_cycles = 0;
- sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
- }
-
- if (ctrl & NAND_CLE) {
- sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
- } else if (ctrl & NAND_ALE) {
- sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
- dat << ((sunxi_nand->addr_cycles % 4) * 8);
- sunxi_nand->addr_cycles++;
- }
-}
-
/* These seed values have been extracted from Allwinner's BSP */
static const u16 sunxi_nfc_randomizer_page_seeds[] = {
0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
@@ -684,8 +567,10 @@ static u16 sunxi_nfc_randomizer_step(u16 state, int count)
return state;
}
-static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
+static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page,
+ bool ecc)
{
+ struct mtd_info *mtd = nand_to_mtd(nand);
const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
int mod = mtd_div_by_ws(mtd->erasesize, mtd);
@@ -702,10 +587,9 @@ static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
return seeds[page % mod];
}
-static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
- int page, bool ecc)
+static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page,
+ bool ecc)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
u16 state;
@@ -714,14 +598,13 @@ static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
return;
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
- state = sunxi_nfc_randomizer_state(mtd, page, ecc);
+ state = sunxi_nfc_randomizer_state(nand, page, ecc);
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
}
-static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
+static void sunxi_nfc_randomizer_enable(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
if (!(nand->options & NAND_NEED_SCRAMBLING))
@@ -731,9 +614,8 @@ static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
nfc->regs + NFC_REG_ECC_CTL);
}
-static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
+static void sunxi_nfc_randomizer_disable(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
if (!(nand->options & NAND_NEED_SCRAMBLING))
@@ -743,36 +625,35 @@ static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
nfc->regs + NFC_REG_ECC_CTL);
}
-static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
+static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm)
{
- u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
+ u16 state = sunxi_nfc_randomizer_state(nand, page, true);
bbm[0] ^= state;
bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
}
-static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
+static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand,
const uint8_t *buf, int len,
bool ecc, int page)
{
- sunxi_nfc_randomizer_config(mtd, page, ecc);
- sunxi_nfc_randomizer_enable(mtd);
- sunxi_nfc_write_buf(mtd_to_nand(mtd), buf, len);
- sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_randomizer_config(nand, page, ecc);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_write_buf(nand, buf, len);
+ sunxi_nfc_randomizer_disable(nand);
}
-static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
+static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf,
int len, bool ecc, int page)
{
- sunxi_nfc_randomizer_config(mtd, page, ecc);
- sunxi_nfc_randomizer_enable(mtd);
- sunxi_nfc_read_buf(mtd_to_nand(mtd), buf, len);
- sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_randomizer_config(nand, page, ecc);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_read_buf(nand, buf, len);
+ sunxi_nfc_randomizer_disable(nand);
}
-static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
+static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
u32 ecc_ctl;
@@ -789,9 +670,8 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
}
-static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
+static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
@@ -811,10 +691,9 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
}
-static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
+static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob,
int step, bool bbm, int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
@@ -822,21 +701,20 @@ static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
/* De-randomize the Bad Block Marker. */
if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
- sunxi_nfc_randomize_bbm(mtd, page, oob);
+ sunxi_nfc_randomize_bbm(nand, page, oob);
}
-static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
+static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand,
const u8 *oob, int step,
bool bbm, int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
u8 user_data[4];
/* Randomize the Bad Block Marker. */
if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
memcpy(user_data, oob, sizeof(user_data));
- sunxi_nfc_randomize_bbm(mtd, page, user_data);
+ sunxi_nfc_randomize_bbm(nand, page, user_data);
oob = user_data;
}
@@ -844,9 +722,11 @@ static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
nfc->regs + NFC_REG_USER_DATA(step));
}
-static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
+static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand,
unsigned int *max_bitflips, int ret)
{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
if (ret < 0) {
mtd->ecc_stats.failed++;
} else {
@@ -855,10 +735,9 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
}
}
-static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
+static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
int step, u32 status, bool *erased)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
u32 tmp;
@@ -892,14 +771,13 @@ static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
return NFC_ECC_ERR_CNT(step, tmp);
}
-static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
+static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
u8 *data, int data_off,
u8 *oob, int oob_off,
int *cur_off,
unsigned int *max_bitflips,
bool bbm, bool oob_required, int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int raw_mode = 0;
@@ -909,7 +787,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
if (*cur_off != data_off)
nand_change_read_column_op(nand, data_off, NULL, 0, false);
- sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
+ sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
nand_change_read_column_op(nand, oob_off, NULL, 0, false);
@@ -918,18 +796,18 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
if (ret)
return ret;
- sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
- sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_randomizer_disable(nand);
if (ret)
return ret;
*cur_off = oob_off + ecc->bytes + 4;
- ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+ ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0,
readl(nfc->regs + NFC_REG_ECC_ST),
&erased);
if (erased)
@@ -961,24 +839,24 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
if (oob_required) {
nand_change_read_column_op(nand, oob_off, NULL, 0,
false);
- sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
+ sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4,
true, page);
- sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0,
bbm, page);
}
}
- sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
+ sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret);
return raw_mode;
}
-static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
+static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand,
u8 *oob, int *cur_off,
bool randomize, int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int offset = ((ecc->bytes + 4) * ecc->steps);
int len = mtd->oobsize - offset;
@@ -993,20 +871,20 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
if (!randomize)
sunxi_nfc_read_buf(nand, oob + offset, len);
else
- sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
+ sunxi_nfc_randomizer_read_buf(nand, oob + offset, len,
false, page);
if (cur_off)
*cur_off = mtd->oobsize + mtd->writesize;
}
-static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf,
int oob_required, int page,
int nchunks)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
bool randomized = nand->options & NAND_NEED_SCRAMBLING;
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
unsigned int max_bitflips = 0;
int ret, i, raw_mode = 0;
@@ -1017,14 +895,14 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (ret)
return ret;
- ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
+ ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks,
DMA_FROM_DEVICE, &sg);
if (ret)
return ret;
- sunxi_nfc_hw_ecc_enable(mtd);
- sunxi_nfc_randomizer_config(mtd, page, false);
- sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_randomizer_config(nand, page, false);
+ sunxi_nfc_randomizer_enable(nand);
writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
@@ -1038,10 +916,10 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (ret)
dmaengine_terminate_all(nfc->dmac);
- sunxi_nfc_randomizer_disable(mtd);
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_randomizer_disable(nand);
+ sunxi_nfc_hw_ecc_disable(nand);
- sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
+ sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg);
if (ret)
return ret;
@@ -1055,7 +933,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
u8 *oob = nand->oob_poi + oob_off;
bool erased;
- ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
+ ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL,
oob_required ? oob : NULL,
i, status, &erased);
@@ -1069,14 +947,14 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
mtd->writesize + oob_off,
oob, ecc->bytes + 4, false);
- sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i,
!i, page);
}
if (erased)
raw_mode = 1;
- sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+ sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
}
if (status & NFC_ECC_ERR_MSK) {
@@ -1111,25 +989,24 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (ret >= 0)
raw_mode = 1;
- sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+ sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
}
}
if (oob_required)
- sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
+ sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi,
NULL, !raw_mode,
page);
return max_bitflips;
}
-static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
+static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
const u8 *data, int data_off,
const u8 *oob, int oob_off,
int *cur_off, bool bbm,
int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret;
@@ -1137,7 +1014,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
if (data_off != *cur_off)
nand_change_write_column_op(nand, data_off, NULL, 0, false);
- sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
+ sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
nand_change_write_column_op(nand, oob_off, NULL, 0, false);
@@ -1146,15 +1023,15 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
if (ret)
return ret;
- sunxi_nfc_randomizer_enable(mtd);
- sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
+ sunxi_nfc_randomizer_enable(nand);
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
NFC_ACCESS_DIR | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
- sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_randomizer_disable(nand);
if (ret)
return ret;
@@ -1163,11 +1040,11 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
return 0;
}
-static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
+static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand,
u8 *oob, int *cur_off,
int page)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int offset = ((ecc->bytes + 4) * ecc->steps);
int len = mtd->oobsize - offset;
@@ -1179,32 +1056,34 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
nand_change_write_column_op(nand, offset + mtd->writesize,
NULL, 0, false);
- sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
+ sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page);
if (cur_off)
*cur_off = mtd->oobsize + mtd->writesize;
}
-static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
+static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf,
int oob_required, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
unsigned int max_bitflips = 0;
int ret, i, cur_off = 0;
bool raw_mode = false;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
- sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
for (i = 0; i < ecc->steps; i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
u8 *data = buf + data_off;
- u8 *oob = chip->oob_poi + oob_off;
+ u8 *oob = nand->oob_poi + oob_off;
- ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
+ ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, &max_bitflips,
!i, oob_required, page);
@@ -1215,52 +1094,55 @@ static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
}
if (oob_required)
- sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+ sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off,
!raw_mode, page);
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(nand);
return max_bitflips;
}
-static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *chip, u8 *buf,
+static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf,
int oob_required, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
- ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
- chip->ecc.steps);
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page,
+ nand->ecc.steps);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- return sunxi_nfc_hw_ecc_read_page(chip, buf, oob_required, page);
+ return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page);
}
-static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand,
u32 data_offs, u32 readlen,
u8 *bufpoi, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
- sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
u8 *data = bufpoi + data_off;
- u8 *oob = chip->oob_poi + oob_off;
+ u8 *oob = nand->oob_poi + oob_off;
- ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
+ ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off,
oob,
oob_off + mtd->writesize,
&cur_off, &max_bitflips, !i,
@@ -1269,113 +1151,118 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip,
return ret;
}
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(nand);
return max_bitflips;
}
-static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand,
u32 data_offs, u32 readlen,
u8 *buf, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+ int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size);
int ret;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_read_page_op(nand, page, 0, NULL, 0);
- ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- return sunxi_nfc_hw_ecc_read_subpage(chip, data_offs, readlen,
+ return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen,
buf, page);
}
-static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand,
const uint8_t *buf, int oob_required,
int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
- sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
for (i = 0; i < ecc->steps; i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
const u8 *data = buf + data_off;
- const u8 *oob = chip->oob_poi + oob_off;
+ const u8 *oob = nand->oob_poi + oob_off;
- ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, !i, page);
if (ret)
return ret;
}
- if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
- sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
&cur_off, page);
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(nand);
- return nand_prog_page_end_op(chip);
+ return nand_prog_page_end_op(nand);
}
-static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand,
u32 data_offs, u32 data_len,
const u8 *buf, int oob_required,
int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
- sunxi_nfc_hw_ecc_enable(mtd);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
+ sunxi_nfc_hw_ecc_enable(nand);
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
const u8 *data = buf + data_off;
- const u8 *oob = chip->oob_poi + oob_off;
+ const u8 *oob = nand->oob_poi + oob_off;
- ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, !i, page);
if (ret)
return ret;
}
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(nand);
- return nand_prog_page_end_op(chip);
+ return nand_prog_page_end_op(nand);
}
-static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
const u8 *buf,
int oob_required,
int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
struct scatterlist sg;
int ret, i;
+ sunxi_nfc_select_chip(nand, nand->cur_cs);
+
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
return ret;
- ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
+ ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps,
DMA_TO_DEVICE, &sg);
if (ret)
goto pio_fallback;
@@ -1383,14 +1270,14 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
for (i = 0; i < ecc->steps; i++) {
const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
- sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page);
}
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
- sunxi_nfc_hw_ecc_enable(mtd);
- sunxi_nfc_randomizer_config(mtd, page, false);
- sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_hw_ecc_enable(nand);
+ sunxi_nfc_randomizer_config(nand, page, false);
+ sunxi_nfc_randomizer_enable(nand);
writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
nfc->regs + NFC_REG_WCMD_SET);
@@ -1405,46 +1292,46 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
if (ret)
dmaengine_terminate_all(nfc->dmac);
- sunxi_nfc_randomizer_disable(mtd);
- sunxi_nfc_hw_ecc_disable(mtd);
+ sunxi_nfc_randomizer_disable(nand);
+ sunxi_nfc_hw_ecc_disable(nand);
- sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
+ sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg);
if (ret)
return ret;
- if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+ if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
/* TODO: use DMA to transfer extra OOB bytes ? */
- sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
NULL, page);
- return nand_prog_page_end_op(chip);
+ return nand_prog_page_end_op(nand);
pio_fallback:
- return sunxi_nfc_hw_ecc_write_page(chip, buf, oob_required, page);
+ return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page);
}
-static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *chip, int page)
+static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
{
- chip->pagebuf = -1;
+ nand->pagebuf = -1;
- return chip->ecc.read_page(chip, chip->data_buf, 1, page);
+ return nand->ecc.read_page(nand, nand->data_buf, 1, page);
}
-static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *chip, int page)
+static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_info *mtd = nand_to_mtd(nand);
int ret;
- chip->pagebuf = -1;
+ nand->pagebuf = -1;
- memset(chip->data_buf, 0xff, mtd->writesize);
- ret = chip->ecc.write_page(chip, chip->data_buf, 1, page);
+ memset(nand->data_buf, 0xff, mtd->writesize);
+ ret = nand->ecc.write_page(nand, nand->data_buf, 1, page);
if (ret)
return ret;
/* Send command to program the OOB data */
- return nand_prog_page_end_op(chip);
+ return nand_prog_page_end_op(nand);
}
static const s32 tWB_lut[] = {6, 12, 16, 20};
@@ -1471,8 +1358,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
const struct nand_data_interface *conf)
{
- struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
- struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
const struct nand_sdr_timings *timings;
u32 min_clk_period = 0;
s32 tWB, tADL, tWHR, tRHW, tCAD;
@@ -1555,6 +1442,20 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
if (timings->tRHW_min > (min_clk_period * 20))
min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
+ /*
+ * In non-EDO, tREA should be less than tRP to guarantee that the
+ * controller does not sample the IO lines too early. Unfortunately,
+ * the sunxi NAND controller does not allow us to have different
+ * values for tRP and tREH (tRP = tREH = tRW / 2).
+ *
+ * We have 2 options to overcome this limitation:
+ *
+ * 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint
+ * 2/ Use EDO mode (only works if timings->tRLOH > 0)
+ */
+ if (timings->tREA_max > min_clk_period && !timings->tRLOH_min)
+ min_clk_period = timings->tREA_max;
+
tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
min_clk_period);
if (tWB < 0) {
@@ -1591,7 +1492,7 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
tCAD = 0x7;
/* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
- chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+ sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
/* Convert min_clk_period from picoseconds to nanoseconds */
min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
@@ -1602,21 +1503,24 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
* This new formula was verified with a scope and validated by
* Allwinner engineers.
*/
- chip->clk_rate = NSEC_PER_SEC / min_clk_period;
- real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+ sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period;
+ real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate);
if (real_clk_rate <= 0) {
- dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
+ dev_err(nfc->dev, "Unable to round clk %lu\n",
+ sunxi_nand->clk_rate);
return -EINVAL;
}
+ sunxi_nand->timing_ctl = 0;
+
/*
* ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
* output cycle timings shall be used if the host drives tRC less than
- * 30 ns.
+ * 30 ns. We should also use EDO mode if tREA is bigger than tRP.
*/
min_clk_period = NSEC_PER_SEC / real_clk_rate;
- chip->timing_ctl = ((min_clk_period * 2) < 30) ?
- NFC_TIMING_CTL_EDO : 0;
+ if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max)
+ sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO;
return 0;
}
@@ -1677,14 +1581,13 @@ static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
kfree(ecc->priv);
}
-static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
- struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct sunxi_nand_hw_ecc *data;
int nsectors;
int ret;
@@ -1808,7 +1711,6 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
static int sunxi_nand_attach_chip(struct nand_chip *nand)
{
- struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
struct device_node *np = nand_get_flash_node(nand);
int ret;
@@ -1831,7 +1733,7 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
switch (ecc->mode) {
case NAND_ECC_HW:
- ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
+ ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np);
if (ret)
return ret;
break;
@@ -1845,15 +1747,165 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
return 0;
}
+static int sunxi_nfc_exec_subop(struct nand_chip *nand,
+ const struct nand_subop *subop)
+{
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { };
+ unsigned int i, j, remaining, start;
+ void *inbuf = NULL;
+ int ret;
+
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (cmd & NFC_SEND_CMD1) {
+ if (WARN_ON(cmd & NFC_SEND_CMD2))
+ return -EINVAL;
+
+ cmd |= NFC_SEND_CMD2;
+ extcmd |= instr->ctx.cmd.opcode;
+ } else {
+ cmd |= NFC_SEND_CMD1 |
+ NFC_CMD(instr->ctx.cmd.opcode);
+ }
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ remaining = nand_subop_get_num_addr_cyc(subop, i);
+ start = nand_subop_get_addr_start_off(subop, i);
+ for (j = 0; j < 8 && j + start < remaining; j++) {
+ u32 addr = instr->ctx.addr.addrs[j + start];
+
+ addrs[j / 4] |= addr << (j % 4) * 8;
+ }
+
+ if (j)
+ cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j);
+
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ start = nand_subop_get_data_start_off(subop, i);
+ remaining = nand_subop_get_data_len(subop, i);
+ cnt = min_t(u32, remaining, NFC_SRAM_SIZE);
+ cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+
+ if (instr->type == NAND_OP_DATA_OUT_INSTR) {
+ cmd |= NFC_ACCESS_DIR;
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE,
+ instr->ctx.data.buf.out + start,
+ cnt);
+ } else {
+ inbuf = instr->ctx.data.buf.in + start;
+ }
+
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ cmd |= NFC_WAIT_FLAG;
+ break;
+ }
+ }
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ if (cmd & NFC_SEND_ADR) {
+ writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW);
+ writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH);
+ }
+
+ if (cmd & NFC_SEND_CMD2)
+ writel(extcmd,
+ nfc->regs +
+ (cmd & NFC_ACCESS_DIR ?
+ NFC_REG_WCMD_SET : NFC_REG_RCMD_SET));
+
+ if (cmd & NFC_DATA_TRANS)
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+
+ writel(cmd, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG,
+ !(cmd & NFC_WAIT_FLAG) && cnt < 64,
+ 0);
+ if (ret)
+ return ret;
+
+ if (inbuf)
+ memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt);
+
+ return 0;
+}
+
+static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand,
+ const struct nand_subop *subop)
+{
+ return nand_soft_waitrdy(nand,
+ subop->instrs[0].ctx.waitrdy.timeout_ms);
+}
+
+static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+);
+
+static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+);
+
+static int sunxi_nfc_exec_op(struct nand_chip *nand,
+ const struct nand_operation *op, bool check_only)
+{
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ const struct nand_op_parser *parser;
+
+ sunxi_nfc_select_chip(nand, op->cs);
+
+ if (sunxi_nand->sels[op->cs].rb >= 0)
+ parser = &sunxi_nfc_op_parser;
+ else
+ parser = &sunxi_nfc_norb_op_parser;
+
+ return nand_op_parser_exec_op(nand, parser, op, check_only);
+}
+
static const struct nand_controller_ops sunxi_nand_controller_ops = {
.attach_chip = sunxi_nand_attach_chip,
.setup_data_interface = sunxi_nfc_setup_data_interface,
+ .exec_op = sunxi_nfc_exec_op,
};
static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
struct device_node *np)
{
- struct sunxi_nand_chip *chip;
+ struct sunxi_nand_chip *sunxi_nand;
struct mtd_info *mtd;
struct nand_chip *nand;
int nsels;
@@ -1870,17 +1922,14 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
return -EINVAL;
}
- chip = devm_kzalloc(dev,
- sizeof(*chip) +
- (nsels * sizeof(struct sunxi_nand_chip_sel)),
- GFP_KERNEL);
- if (!chip) {
+ sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels),
+ GFP_KERNEL);
+ if (!sunxi_nand) {
dev_err(dev, "could not allocate chip\n");
return -ENOMEM;
}
- chip->nsels = nsels;
- chip->selected = -1;
+ sunxi_nand->nsels = nsels;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &tmp);
@@ -1902,18 +1951,17 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
return -EINVAL;
}
- chip->sels[i].cs = tmp;
+ sunxi_nand->sels[i].cs = tmp;
if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
tmp < 2)
- chip->sels[i].rb = tmp;
+ sunxi_nand->sels[i].rb = tmp;
else
- chip->sels[i].rb = -1;
+ sunxi_nand->sels[i].rb = -1;
}
- nand = &chip->nand;
+ nand = &sunxi_nand->nand;
/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
- nand->legacy.chip_delay = 200;
nand->controller = &nfc->controller;
nand->controller->ops = &sunxi_nand_controller_ops;
@@ -1923,11 +1971,6 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
*/
nand->ecc.mode = NAND_ECC_HW;
nand_set_flash_node(nand, np);
- nand->legacy.select_chip = sunxi_nfc_select_chip;
- nand->legacy.cmd_ctrl = sunxi_nfc_cmd_ctrl;
- nand->legacy.read_buf = sunxi_nfc_read_buf;
- nand->legacy.write_buf = sunxi_nfc_write_buf;
- nand->legacy.read_byte = sunxi_nfc_read_byte;
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
@@ -1943,7 +1986,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
return ret;
}
- list_add_tail(&chip->node, &nfc->chips);
+ list_add_tail(&sunxi_nand->node, &nfc->chips);
return 0;
}
@@ -1973,14 +2016,15 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
{
- struct sunxi_nand_chip *chip;
+ struct sunxi_nand_chip *sunxi_nand;
while (!list_empty(&nfc->chips)) {
- chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
- node);
- nand_release(&chip->nand);
- sunxi_nand_ecc_cleanup(&chip->nand.ecc);
- list_del(&chip->node);
+ sunxi_nand = list_first_entry(&nfc->chips,
+ struct sunxi_nand_chip,
+ node);
+ nand_release(&sunxi_nand->nand);
+ sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc);
+ list_del(&sunxi_nand->node);
}
}
@@ -2124,7 +2168,7 @@ static struct platform_driver sunxi_nfc_driver = {
};
module_platform_driver(sunxi_nfc_driver);
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris BREZILLON");
MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index f3b59e649b7d..db030f1701ee 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -104,6 +104,7 @@
struct tmio_nand {
struct nand_chip chip;
+ struct completion comp;
struct platform_device *dev;
@@ -168,15 +169,11 @@ static int tmio_nand_dev_ready(struct nand_chip *chip)
static irqreturn_t tmio_irq(int irq, void *__tmio)
{
struct tmio_nand *tmio = __tmio;
- struct nand_chip *nand_chip = &tmio->chip;
/* disable RDYREQ interrupt */
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+ complete(&tmio->comp);
- if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
- dev_warn(&tmio->dev->dev, "spurious interrupt\n");
-
- wake_up(&nand_chip->controller->wq);
return IRQ_HANDLED;
}
@@ -193,18 +190,18 @@ static int tmio_nand_wait(struct nand_chip *nand_chip)
u8 status;
/* enable RDYREQ interrupt */
+
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+ reinit_completion(&tmio->comp);
tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
- timeout = wait_event_timeout(nand_chip->controller->wq,
- tmio_nand_dev_ready(nand_chip),
- msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
+ timeout = 400;
+ timeout = wait_for_completion_timeout(&tmio->comp,
+ msecs_to_jiffies(timeout));
if (unlikely(!tmio_nand_dev_ready(nand_chip))) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
- dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
- nand_chip->state == FL_ERASING ? "erase" : "program",
- nand_chip->state == FL_ERASING ? 400 : 20);
+ dev_warn(&tmio->dev->dev, "still busy after 400 ms\n");
} else if (unlikely(!timeout)) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
@@ -378,6 +375,8 @@ static int tmio_probe(struct platform_device *dev)
if (!tmio)
return -ENOMEM;
+ init_completion(&tmio->comp);
+
tmio->dev = dev;
platform_set_drvdata(dev, tmio);
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e4141c20947a..0b49d8264bef 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -12,6 +12,8 @@
#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
+#define GD5FXGQ4UEXXG_REG_STATUS2 0xf0
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -81,11 +83,83 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 64;
+ region->length = 64;
+
+ return 0;
+}
+
+static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 1 bytes for the BBM. */
+ region->offset = 1;
+ region->length = 63;
+
+ return 0;
+}
+
+static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQ4UEXXG_REG_STATUS2,
+ &status2);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ /*
+ * Read status2 register to determine a more fine grained
+ * bit error status
+ */
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * 4 ... 7 bits are flipped (1..4 can't be detected, so
+ * report the maximum of 4 in this case
+ */
+ /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ return ((status & STATUS_ECC_MASK) >> 2) |
+ ((status2 & STATUS_ECC_MASK) >> 4);
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
.ecc = gd5fxgq4xa_ooblayout_ecc,
.free = gd5fxgq4xa_ooblayout_free,
};
+static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = {
+ .ecc = gd5fxgq4uexxg_ooblayout_ecc,
+ .free = gd5fxgq4uexxg_ooblayout_free,
+};
+
static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO("GD5F1GQ4xA", 0xF1,
NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
@@ -114,6 +188,15 @@ static const struct spinand_info gigadevice_spinand_table[] = {
0,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
};
static int gigadevice_spinand_detect(struct spinand_device *spinand)
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 98f6b9c4b684..d16b57081c95 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -10,6 +10,7 @@
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MACRONIX 0xC2
+#define MACRONIX_ECCSR_MASK 0x0F
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -55,7 +56,12 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
SPI_MEM_OP_DUMMY(1, 1),
SPI_MEM_OP_DATA_IN(1, eccsr, 1));
- return spi_mem_exec_op(spinand->spimem, &op);
+ int ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ *eccsr &= MACRONIX_ECCSR_MASK;
+ return 0;
}
static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 081265557e70..db8021da45b5 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -25,19 +25,19 @@ static SPINAND_OP_VARIANTS(write_cache_variants,
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
-static int tc58cvg2s0h_ooblayout_ecc(struct mtd_info *mtd, int section,
+static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
- if (section > 7)
+ if (section > 0)
return -ERANGE;
- region->offset = 128 + 16 * section;
- region->length = 16;
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
return 0;
}
-static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section,
+static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 0)
@@ -45,17 +45,17 @@ static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section,
/* 2 bytes reserved for BBM */
region->offset = 2;
- region->length = 126;
+ region->length = (mtd->oobsize / 2) - 2;
return 0;
}
-static const struct mtd_ooblayout_ops tc58cvg2s0h_ooblayout = {
- .ecc = tc58cvg2s0h_ooblayout_ecc,
- .free = tc58cvg2s0h_ooblayout_free,
+static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = {
+ .ecc = tc58cxgxsx_ooblayout_ecc,
+ .free = tc58cxgxsx_ooblayout_free,
};
-static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand,
+static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
@@ -94,15 +94,66 @@ static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand,
}
static const struct spinand_info toshiba_spinand_table[] = {
- SPINAND_INFO("TC58CVG2S0H", 0xCD,
+ /* 3.3V 1Gb */
+ SPINAND_INFO("TC58CVG0S3", 0xC2,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 3.3V 2Gb */
+ SPINAND_INFO("TC58CVG1S3", 0xCB,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 3.3V 4Gb */
+ SPINAND_INFO("TC58CVG2S0", 0xCD,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 1.8V 1Gb */
+ SPINAND_INFO("TC58CYG0S3", 0xB2,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 1.8V 2Gb */
+ SPINAND_INFO("TC58CYG1S3", 0xBB,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
+ /* 1.8V 4Gb */
+ SPINAND_INFO("TC58CYG2S0", 0xBD,
NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&tc58cvg2s0h_ooblayout,
- tc58cvg2s0h_ecc_get_status)),
+ 0,
+ SPINAND_ECCINFO(&tc58cxgxsx_ooblayout,
+ tc58cxgxsx_ecc_get_status)),
};
static int toshiba_spinand_detect(struct spinand_device *spinand)
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 44fe8018733c..dab986691267 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -7,14 +7,6 @@ menuconfig MTD_SPI_NOR
if MTD_SPI_NOR
-config MTD_MT81xx_NOR
- tristate "Mediatek MT81xx SPI NOR flash controller"
- depends on HAS_IOMEM
- help
- This enables access to SPI NOR flash, using MT81xx SPI NOR flash
- controller. This controller does not support generic SPI BUS, it only
- supports SPI NOR Flash.
-
config MTD_SPI_NOR_USE_4K_SECTORS
bool "Use small 4096 B erase sectors"
default y
@@ -50,15 +42,6 @@ config SPI_CADENCE_QUADSPI
device with a Cadence QSPI controller and want to access the
Flash as an MTD device.
-config SPI_FSL_QUADSPI
- tristate "Freescale Quad SPI controller"
- depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables support for the Quad SPI controller in master mode.
- This controller does not support generic SPI. It only supports
- SPI NOR.
-
config SPI_HISI_SFC
tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
depends on ARCH_HISI || COMPILE_TEST
@@ -66,6 +49,14 @@ config SPI_HISI_SFC
help
This enables support for hisilicon SPI-NOR flash controller.
+config SPI_MTK_QUADSPI
+ tristate "MediaTek Quad SPI controller"
+ depends on HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This controller does not support generic SPI. It only supports
+ SPI NOR.
+
config SPI_NXP_SPIFI
tristate "NXP SPI Flash Interface (SPIFI)"
depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index a552efd22958..189a15cca3ec 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -2,9 +2,8 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
-obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
-obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
+obj-$(CONFIG_SPI_MTK_QUADSPI) += mtk-quadspi.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 04cedd3a2bf6..792628750eec 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -44,6 +44,12 @@
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
+/* Capabilities mask */
+#define CQSPI_BASE_HWCAPS_MASK \
+ (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST | \
+ SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 | \
+ SNOR_HWCAPS_PP)
+
struct cqspi_st;
struct cqspi_flash_pdata {
@@ -93,6 +99,11 @@ struct cqspi_st {
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
+struct cqspi_driver_platdata {
+ u32 hwcaps_mask;
+ u8 quirks;
+};
+
/* Operation timeout value */
#define CQSPI_TIMEOUT_MS 500
#define CQSPI_READ_TIMEOUT_MS 10
@@ -101,6 +112,7 @@ struct cqspi_st {
#define CQSPI_INST_TYPE_SINGLE 0
#define CQSPI_INST_TYPE_DUAL 1
#define CQSPI_INST_TYPE_QUAD 2
+#define CQSPI_INST_TYPE_OCTAL 3
#define CQSPI_DUMMY_CLKS_PER_BYTE 8
#define CQSPI_DUMMY_BYTES_MAX 4
@@ -418,9 +430,10 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
void __iomem *reg_base = cqspi->iobase;
unsigned int reg;
unsigned int data;
+ u32 write_len;
int ret;
- if (n_tx > 4 || (n_tx && !txbuf)) {
+ if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
dev_err(nor->dev,
"Invalid input argument, cmdlen %d txbuf 0x%p\n",
n_tx, txbuf);
@@ -433,10 +446,18 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
data = 0;
- memcpy(&data, txbuf, n_tx);
+ write_len = (n_tx > 4) ? 4 : n_tx;
+ memcpy(&data, txbuf, write_len);
+ txbuf += write_len;
writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
- }
+ if (n_tx > 4) {
+ data = 0;
+ write_len = n_tx - 4;
+ memcpy(&data, txbuf, write_len);
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
+ }
+ }
ret = cqspi_exec_flash_cmd(cqspi, reg);
return ret;
}
@@ -911,6 +932,9 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
case SNOR_PROTO_1_1_4:
f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
break;
+ case SNOR_PROTO_1_1_8:
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ break;
default:
return -EINVAL;
}
@@ -1213,21 +1237,23 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ |
- SNOR_HWCAPS_READ_FAST |
- SNOR_HWCAPS_READ_1_1_2 |
- SNOR_HWCAPS_READ_1_1_4 |
- SNOR_HWCAPS_PP,
- };
struct platform_device *pdev = cqspi->pdev;
struct device *dev = &pdev->dev;
+ const struct cqspi_driver_platdata *ddata;
+ struct spi_nor_hwcaps hwcaps;
struct cqspi_flash_pdata *f_pdata;
struct spi_nor *nor;
struct mtd_info *mtd;
unsigned int cs;
int i, ret;
+ ddata = of_device_get_match_data(dev);
+ if (!ddata) {
+ dev_err(dev, "Couldn't find driver data\n");
+ return -EINVAL;
+ }
+ hwcaps.mask = ddata->hwcaps_mask;
+
/* Get flash device data */
for_each_available_child_of_node(dev->of_node, np) {
ret = of_property_read_u32(np, "reg", &cs);
@@ -1310,7 +1336,7 @@ static int cqspi_probe(struct platform_device *pdev)
struct cqspi_st *cqspi;
struct resource *res;
struct resource *res_ahb;
- unsigned long data;
+ const struct cqspi_driver_platdata *ddata;
int ret;
int irq;
@@ -1377,8 +1403,8 @@ static int cqspi_probe(struct platform_device *pdev)
}
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
- data = (unsigned long)of_device_get_match_data(dev);
- if (data & CQSPI_NEEDS_WR_DELAY)
+ ddata = of_device_get_match_data(dev);
+ if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY))
cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
@@ -1460,14 +1486,32 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#define CQSPI_DEV_PM_OPS NULL
#endif
+static const struct cqspi_driver_platdata cdns_qspi = {
+ .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
+};
+
+static const struct cqspi_driver_platdata k2g_qspi = {
+ .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
+ .quirks = CQSPI_NEEDS_WR_DELAY,
+};
+
+static const struct cqspi_driver_platdata am654_ospi = {
+ .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8,
+ .quirks = CQSPI_NEEDS_WR_DELAY,
+};
+
static const struct of_device_id cqspi_dt_ids[] = {
{
.compatible = "cdns,qspi-nor",
- .data = (void *)0,
+ .data = &cdns_qspi,
},
{
.compatible = "ti,k2g-qspi",
- .data = (void *)CQSPI_NEEDS_WR_DELAY,
+ .data = &k2g_qspi,
+ },
+ {
+ .compatible = "ti,am654-ospi",
+ .data = &am654_ospi,
},
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
deleted file mode 100644
index 1ff3430f82c8..000000000000
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ /dev/null
@@ -1,1224 +0,0 @@
-/*
- * Freescale QuadSPI driver.
- *
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-#include <linux/completion.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
-#include <linux/mutex.h>
-#include <linux/pm_qos.h>
-#include <linux/sizes.h>
-
-/* Controller needs driver to swap endian */
-#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0)
-/* Controller needs 4x internal clock */
-#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1)
-/*
- * TKT253890, Controller needs driver to fill txfifo till 16 byte to
- * trigger data transfer even though extern data will not transferred.
- */
-#define QUADSPI_QUIRK_TKT253890 (1 << 2)
-/* Controller cannot wake up from wait mode, TKT245618 */
-#define QUADSPI_QUIRK_TKT245618 (1 << 3)
-
-/* The registers */
-#define QUADSPI_MCR 0x00
-#define QUADSPI_MCR_RESERVED_SHIFT 16
-#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT)
-#define QUADSPI_MCR_MDIS_SHIFT 14
-#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT)
-#define QUADSPI_MCR_CLR_TXF_SHIFT 11
-#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT)
-#define QUADSPI_MCR_CLR_RXF_SHIFT 10
-#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT)
-#define QUADSPI_MCR_DDR_EN_SHIFT 7
-#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT)
-#define QUADSPI_MCR_END_CFG_SHIFT 2
-#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT)
-#define QUADSPI_MCR_SWRSTHD_SHIFT 1
-#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT)
-#define QUADSPI_MCR_SWRSTSD_SHIFT 0
-#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT)
-
-#define QUADSPI_IPCR 0x08
-#define QUADSPI_IPCR_SEQID_SHIFT 24
-#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT)
-
-#define QUADSPI_BUF0CR 0x10
-#define QUADSPI_BUF1CR 0x14
-#define QUADSPI_BUF2CR 0x18
-#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe
-
-#define QUADSPI_BUF3CR 0x1c
-#define QUADSPI_BUF3CR_ALLMST_SHIFT 31
-#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT)
-#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8
-#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT)
-
-#define QUADSPI_BFGENCR 0x20
-#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16
-#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT))
-#define QUADSPI_BFGENCR_SEQID_SHIFT 12
-#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT)
-
-#define QUADSPI_BUF0IND 0x30
-#define QUADSPI_BUF1IND 0x34
-#define QUADSPI_BUF2IND 0x38
-#define QUADSPI_SFAR 0x100
-
-#define QUADSPI_SMPR 0x108
-#define QUADSPI_SMPR_DDRSMP_SHIFT 16
-#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT)
-#define QUADSPI_SMPR_FSDLY_SHIFT 6
-#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT)
-#define QUADSPI_SMPR_FSPHS_SHIFT 5
-#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT)
-#define QUADSPI_SMPR_HSENA_SHIFT 0
-#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT)
-
-#define QUADSPI_RBSR 0x10c
-#define QUADSPI_RBSR_RDBFL_SHIFT 8
-#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT)
-
-#define QUADSPI_RBCT 0x110
-#define QUADSPI_RBCT_WMRK_MASK 0x1F
-#define QUADSPI_RBCT_RXBRD_SHIFT 8
-#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT)
-
-#define QUADSPI_TBSR 0x150
-#define QUADSPI_TBDR 0x154
-#define QUADSPI_SR 0x15c
-#define QUADSPI_SR_IP_ACC_SHIFT 1
-#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT)
-#define QUADSPI_SR_AHB_ACC_SHIFT 2
-#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT)
-
-#define QUADSPI_FR 0x160
-#define QUADSPI_FR_TFF_MASK 0x1
-
-#define QUADSPI_SFA1AD 0x180
-#define QUADSPI_SFA2AD 0x184
-#define QUADSPI_SFB1AD 0x188
-#define QUADSPI_SFB2AD 0x18c
-#define QUADSPI_RBDR 0x200
-
-#define QUADSPI_LUTKEY 0x300
-#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
-
-#define QUADSPI_LCKCR 0x304
-#define QUADSPI_LCKER_LOCK 0x1
-#define QUADSPI_LCKER_UNLOCK 0x2
-
-#define QUADSPI_RSER 0x164
-#define QUADSPI_RSER_TFIE (0x1 << 0)
-
-#define QUADSPI_LUT_BASE 0x310
-
-/*
- * The definition of the LUT register shows below:
- *
- * ---------------------------------------------------
- * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
- * ---------------------------------------------------
- */
-#define OPRND0_SHIFT 0
-#define PAD0_SHIFT 8
-#define INSTR0_SHIFT 10
-#define OPRND1_SHIFT 16
-
-/* Instruction set for the LUT register. */
-#define LUT_STOP 0
-#define LUT_CMD 1
-#define LUT_ADDR 2
-#define LUT_DUMMY 3
-#define LUT_MODE 4
-#define LUT_MODE2 5
-#define LUT_MODE4 6
-#define LUT_FSL_READ 7
-#define LUT_FSL_WRITE 8
-#define LUT_JMP_ON_CS 9
-#define LUT_ADDR_DDR 10
-#define LUT_MODE_DDR 11
-#define LUT_MODE2_DDR 12
-#define LUT_MODE4_DDR 13
-#define LUT_FSL_READ_DDR 14
-#define LUT_FSL_WRITE_DDR 15
-#define LUT_DATA_LEARN 16
-
-/*
- * The PAD definitions for LUT register.
- *
- * The pad stands for the lines number of IO[0:3].
- * For example, the Quad read need four IO lines, so you should
- * set LUT_PAD4 which means we use four IO lines.
- */
-#define LUT_PAD1 0
-#define LUT_PAD2 1
-#define LUT_PAD4 2
-
-/* Oprands for the LUT register. */
-#define ADDR24BIT 0x18
-#define ADDR32BIT 0x20
-
-/* Macros for constructing the LUT register. */
-#define LUT0(ins, pad, opr) \
- (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
- ((LUT_##ins) << INSTR0_SHIFT))
-
-#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
-
-/* other macros for LUT register. */
-#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4)
-#define QUADSPI_LUT_NUM 64
-
-/* SEQID -- we can have 16 seqids at most. */
-#define SEQID_READ 0
-#define SEQID_WREN 1
-#define SEQID_WRDI 2
-#define SEQID_RDSR 3
-#define SEQID_SE 4
-#define SEQID_CHIP_ERASE 5
-#define SEQID_PP 6
-#define SEQID_RDID 7
-#define SEQID_WRSR 8
-#define SEQID_RDCR 9
-#define SEQID_EN4B 10
-#define SEQID_BRWR 11
-
-#define QUADSPI_MIN_IOMAP SZ_4M
-
-enum fsl_qspi_devtype {
- FSL_QUADSPI_VYBRID,
- FSL_QUADSPI_IMX6SX,
- FSL_QUADSPI_IMX7D,
- FSL_QUADSPI_IMX6UL,
- FSL_QUADSPI_LS1021A,
- FSL_QUADSPI_LS2080A,
-};
-
-struct fsl_qspi_devtype_data {
- enum fsl_qspi_devtype devtype;
- int rxfifo;
- int txfifo;
- int ahb_buf_size;
- int driver_data;
-};
-
-static const struct fsl_qspi_devtype_data vybrid_data = {
- .devtype = FSL_QUADSPI_VYBRID,
- .rxfifo = 128,
- .txfifo = 64,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
-};
-
-static const struct fsl_qspi_devtype_data imx6sx_data = {
- .devtype = FSL_QUADSPI_IMX6SX,
- .rxfifo = 128,
- .txfifo = 512,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_4X_INT_CLK
- | QUADSPI_QUIRK_TKT245618,
-};
-
-static const struct fsl_qspi_devtype_data imx7d_data = {
- .devtype = FSL_QUADSPI_IMX7D,
- .rxfifo = 512,
- .txfifo = 512,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_TKT253890
- | QUADSPI_QUIRK_4X_INT_CLK,
-};
-
-static const struct fsl_qspi_devtype_data imx6ul_data = {
- .devtype = FSL_QUADSPI_IMX6UL,
- .rxfifo = 128,
- .txfifo = 512,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_TKT253890
- | QUADSPI_QUIRK_4X_INT_CLK,
-};
-
-static struct fsl_qspi_devtype_data ls1021a_data = {
- .devtype = FSL_QUADSPI_LS1021A,
- .rxfifo = 128,
- .txfifo = 64,
- .ahb_buf_size = 1024,
- .driver_data = 0,
-};
-
-static const struct fsl_qspi_devtype_data ls2080a_data = {
- .devtype = FSL_QUADSPI_LS2080A,
- .rxfifo = 128,
- .txfifo = 64,
- .ahb_buf_size = 1024,
- .driver_data = QUADSPI_QUIRK_TKT253890,
-};
-
-
-#define FSL_QSPI_MAX_CHIP 4
-struct fsl_qspi {
- struct spi_nor nor[FSL_QSPI_MAX_CHIP];
- void __iomem *iobase;
- void __iomem *ahb_addr;
- u32 memmap_phy;
- u32 memmap_offs;
- u32 memmap_len;
- struct clk *clk, *clk_en;
- struct device *dev;
- struct completion c;
- const struct fsl_qspi_devtype_data *devtype_data;
- u32 nor_size;
- u32 nor_num;
- u32 clk_rate;
- unsigned int chip_base_addr; /* We may support two chips. */
- bool has_second_chip;
- bool big_endian;
- struct mutex lock;
- struct pm_qos_request pm_qos_req;
-};
-
-static inline int needs_swap_endian(struct fsl_qspi *q)
-{
- return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN;
-}
-
-static inline int needs_4x_clock(struct fsl_qspi *q)
-{
- return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK;
-}
-
-static inline int needs_fill_txfifo(struct fsl_qspi *q)
-{
- return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890;
-}
-
-static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
-{
- return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
-}
-
-/*
- * R/W functions for big- or little-endian registers:
- * The qSPI controller's endian is independent of the CPU core's endian.
- * So far, although the CPU core is little-endian but the qSPI have two
- * versions for big-endian and little-endian.
- */
-static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
-{
- if (q->big_endian)
- iowrite32be(val, addr);
- else
- iowrite32(val, addr);
-}
-
-static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
-{
- if (q->big_endian)
- return ioread32be(addr);
- else
- return ioread32(addr);
-}
-
-/*
- * An IC bug makes us to re-arrange the 32-bit data.
- * The following chips, such as IMX6SLX, have fixed this bug.
- */
-static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
-{
- return needs_swap_endian(q) ? __swab32(a) : a;
-}
-
-static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
-{
- qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
- qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
-}
-
-static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
-{
- qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
- qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
-}
-
-static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
-{
- struct fsl_qspi *q = dev_id;
- u32 reg;
-
- /* clear interrupt */
- reg = qspi_readl(q, q->iobase + QUADSPI_FR);
- qspi_writel(q, reg, q->iobase + QUADSPI_FR);
-
- if (reg & QUADSPI_FR_TFF_MASK)
- complete(&q->c);
-
- dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg);
- return IRQ_HANDLED;
-}
-
-static void fsl_qspi_init_lut(struct fsl_qspi *q)
-{
- void __iomem *base = q->iobase;
- int rxfifo = q->devtype_data->rxfifo;
- u32 lut_base;
- int i;
-
- struct spi_nor *nor = &q->nor[0];
- u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
- u8 read_op = nor->read_opcode;
- u8 read_dm = nor->read_dummy;
-
- fsl_qspi_unlock_lut(q);
-
- /* Clear all the LUT table */
- for (i = 0; i < QUADSPI_LUT_NUM; i++)
- qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
-
- /* Read */
- lut_base = SEQID_READ * 4;
-
- qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen),
- base + QUADSPI_LUT(lut_base));
- qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) |
- LUT1(FSL_READ, PAD4, rxfifo),
- base + QUADSPI_LUT(lut_base + 1));
-
- /* Write enable */
- lut_base = SEQID_WREN * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN),
- base + QUADSPI_LUT(lut_base));
-
- /* Page Program */
- lut_base = SEQID_PP * 4;
-
- qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) |
- LUT1(ADDR, PAD1, addrlen),
- base + QUADSPI_LUT(lut_base));
- qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
- base + QUADSPI_LUT(lut_base + 1));
-
- /* Read Status */
- lut_base = SEQID_RDSR * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) |
- LUT1(FSL_READ, PAD1, 0x1),
- base + QUADSPI_LUT(lut_base));
-
- /* Erase a sector */
- lut_base = SEQID_SE * 4;
-
- qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) |
- LUT1(ADDR, PAD1, addrlen),
- base + QUADSPI_LUT(lut_base));
-
- /* Erase the whole chip */
- lut_base = SEQID_CHIP_ERASE * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
- base + QUADSPI_LUT(lut_base));
-
- /* READ ID */
- lut_base = SEQID_RDID * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) |
- LUT1(FSL_READ, PAD1, 0x8),
- base + QUADSPI_LUT(lut_base));
-
- /* Write Register */
- lut_base = SEQID_WRSR * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) |
- LUT1(FSL_WRITE, PAD1, 0x2),
- base + QUADSPI_LUT(lut_base));
-
- /* Read Configuration Register */
- lut_base = SEQID_RDCR * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) |
- LUT1(FSL_READ, PAD1, 0x1),
- base + QUADSPI_LUT(lut_base));
-
- /* Write disable */
- lut_base = SEQID_WRDI * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI),
- base + QUADSPI_LUT(lut_base));
-
- /* Enter 4 Byte Mode (Micron) */
- lut_base = SEQID_EN4B * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B),
- base + QUADSPI_LUT(lut_base));
-
- /* Enter 4 Byte Mode (Spansion) */
- lut_base = SEQID_BRWR * 4;
- qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR),
- base + QUADSPI_LUT(lut_base));
-
- fsl_qspi_lock_lut(q);
-}
-
-/* Get the SEQID for the command */
-static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
-{
- switch (cmd) {
- case SPINOR_OP_READ_1_1_4:
- case SPINOR_OP_READ_1_1_4_4B:
- return SEQID_READ;
- case SPINOR_OP_WREN:
- return SEQID_WREN;
- case SPINOR_OP_WRDI:
- return SEQID_WRDI;
- case SPINOR_OP_RDSR:
- return SEQID_RDSR;
- case SPINOR_OP_SE:
- return SEQID_SE;
- case SPINOR_OP_CHIP_ERASE:
- return SEQID_CHIP_ERASE;
- case SPINOR_OP_PP:
- return SEQID_PP;
- case SPINOR_OP_RDID:
- return SEQID_RDID;
- case SPINOR_OP_WRSR:
- return SEQID_WRSR;
- case SPINOR_OP_RDCR:
- return SEQID_RDCR;
- case SPINOR_OP_EN4B:
- return SEQID_EN4B;
- case SPINOR_OP_BRWR:
- return SEQID_BRWR;
- default:
- if (cmd == q->nor[0].erase_opcode)
- return SEQID_SE;
- dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
- break;
- }
- return -EINVAL;
-}
-
-static int
-fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
-{
- void __iomem *base = q->iobase;
- int seqid;
- u32 reg, reg2;
- int err;
-
- init_completion(&q->c);
- dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
- q->chip_base_addr, addr, len, cmd);
-
- /* save the reg */
- reg = qspi_readl(q, base + QUADSPI_MCR);
-
- qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr,
- base + QUADSPI_SFAR);
- qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
- base + QUADSPI_RBCT);
- qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
-
- do {
- reg2 = qspi_readl(q, base + QUADSPI_SR);
- if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
- udelay(1);
- dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
- continue;
- }
- break;
- } while (1);
-
- /* trigger the LUT now */
- seqid = fsl_qspi_get_seqid(q, cmd);
- if (seqid < 0)
- return seqid;
-
- qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
- base + QUADSPI_IPCR);
-
- /* Wait for the interrupt. */
- if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) {
- dev_err(q->dev,
- "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
- cmd, addr, qspi_readl(q, base + QUADSPI_FR),
- qspi_readl(q, base + QUADSPI_SR));
- err = -ETIMEDOUT;
- } else {
- err = 0;
- }
-
- /* restore the MCR */
- qspi_writel(q, reg, base + QUADSPI_MCR);
-
- return err;
-}
-
-/* Read out the data from the QUADSPI_RBDR buffer registers. */
-static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
-{
- u32 tmp;
- int i = 0;
-
- while (len > 0) {
- tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4);
- tmp = fsl_qspi_endian_xchg(q, tmp);
- dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
- q->chip_base_addr, tmp);
-
- if (len >= 4) {
- *((u32 *)rxbuf) = tmp;
- rxbuf += 4;
- } else {
- memcpy(rxbuf, &tmp, len);
- break;
- }
-
- len -= 4;
- i++;
- }
-}
-
-/*
- * If we have changed the content of the flash by writing or erasing,
- * we need to invalidate the AHB buffer. If we do not do so, we may read out
- * the wrong data. The spec tells us reset the AHB domain and Serial Flash
- * domain at the same time.
- */
-static inline void fsl_qspi_invalid(struct fsl_qspi *q)
-{
- u32 reg;
-
- reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
- reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
- qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
-
- /*
- * The minimum delay : 1 AHB + 2 SFCK clocks.
- * Delay 1 us is enough.
- */
- udelay(1);
-
- reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
- qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
-}
-
-static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
- u8 opcode, unsigned int to, u32 *txbuf,
- unsigned count)
-{
- int ret, i, j;
- u32 tmp;
-
- dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
- q->chip_base_addr, to, count);
-
- /* clear the TX FIFO. */
- tmp = qspi_readl(q, q->iobase + QUADSPI_MCR);
- qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
-
- /* fill the TX data to the FIFO */
- for (j = 0, i = ((count + 3) / 4); j < i; j++) {
- tmp = fsl_qspi_endian_xchg(q, *txbuf);
- qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
- txbuf++;
- }
-
- /* fill the TXFIFO upto 16 bytes for i.MX7d */
- if (needs_fill_txfifo(q))
- for (; i < 4; i++)
- qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
-
- /* Trigger it */
- ret = fsl_qspi_runcmd(q, opcode, to, count);
-
- if (ret == 0)
- return count;
-
- return ret;
-}
-
-static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
-{
- int nor_size = q->nor_size;
- void __iomem *base = q->iobase;
-
- qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
- qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
- qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
- qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
-}
-
-/*
- * There are two different ways to read out the data from the flash:
- * the "IP Command Read" and the "AHB Command Read".
- *
- * The IC guy suggests we use the "AHB Command Read" which is faster
- * then the "IP Command Read". (What's more is that there is a bug in
- * the "IP Command Read" in the Vybrid.)
- *
- * After we set up the registers for the "AHB Command Read", we can use
- * the memcpy to read the data directly. A "missed" access to the buffer
- * causes the controller to clear the buffer, and use the sequence pointed
- * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
- */
-static int fsl_qspi_init_ahb_read(struct fsl_qspi *q)
-{
- void __iomem *base = q->iobase;
- int seqid;
-
- /* AHB configuration for access buffer 0/1/2 .*/
- qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
- qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
- qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
- /*
- * Set ADATSZ with the maximum AHB buffer size to improve the
- * read performance.
- */
- qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
- ((q->devtype_data->ahb_buf_size / 8)
- << QUADSPI_BUF3CR_ADATSZ_SHIFT),
- base + QUADSPI_BUF3CR);
-
- /* We only use the buffer3 */
- qspi_writel(q, 0, base + QUADSPI_BUF0IND);
- qspi_writel(q, 0, base + QUADSPI_BUF1IND);
- qspi_writel(q, 0, base + QUADSPI_BUF2IND);
-
- /* Set the default lut sequence for AHB Read. */
- seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
- if (seqid < 0)
- return seqid;
-
- qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
- q->iobase + QUADSPI_BFGENCR);
-
- return 0;
-}
-
-/* This function was used to prepare and enable QSPI clock */
-static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
-{
- int ret;
-
- ret = clk_prepare_enable(q->clk_en);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(q->clk);
- if (ret) {
- clk_disable_unprepare(q->clk_en);
- return ret;
- }
-
- if (needs_wakeup_wait_mode(q))
- pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
-
- return 0;
-}
-
-/* This function was used to disable and unprepare QSPI clock */
-static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
-{
- if (needs_wakeup_wait_mode(q))
- pm_qos_remove_request(&q->pm_qos_req);
-
- clk_disable_unprepare(q->clk);
- clk_disable_unprepare(q->clk_en);
-
-}
-
-/* We use this function to do some basic init for spi_nor_scan(). */
-static int fsl_qspi_nor_setup(struct fsl_qspi *q)
-{
- void __iomem *base = q->iobase;
- u32 reg;
- int ret;
-
- /* disable and unprepare clock to avoid glitch pass to controller */
- fsl_qspi_clk_disable_unprep(q);
-
- /* the default frequency, we will change it in the future. */
- ret = clk_set_rate(q->clk, 66000000);
- if (ret)
- return ret;
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret)
- return ret;
-
- /* Reset the module */
- qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
- base + QUADSPI_MCR);
- udelay(1);
-
- /* Init the LUT table. */
- fsl_qspi_init_lut(q);
-
- /* Disable the module */
- qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
- base + QUADSPI_MCR);
-
- reg = qspi_readl(q, base + QUADSPI_SMPR);
- qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
- | QUADSPI_SMPR_FSPHS_MASK
- | QUADSPI_SMPR_HSENA_MASK
- | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
-
- /* Enable the module */
- qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
- base + QUADSPI_MCR);
-
- /* clear all interrupt status */
- qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
-
- /* enable the interrupt */
- qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
-
- return 0;
-}
-
-static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
-{
- unsigned long rate = q->clk_rate;
- int ret;
-
- if (needs_4x_clock(q))
- rate *= 4;
-
- /* disable and unprepare clock to avoid glitch pass to controller */
- fsl_qspi_clk_disable_unprep(q);
-
- ret = clk_set_rate(q->clk, rate);
- if (ret)
- return ret;
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret)
- return ret;
-
- /* Init the LUT table again. */
- fsl_qspi_init_lut(q);
-
- /* Init for AHB read */
- return fsl_qspi_init_ahb_read(q);
-}
-
-static const struct of_device_id fsl_qspi_dt_ids[] = {
- { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
- { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
- { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
- { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
- { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
- { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
-
-static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor)
-{
- q->chip_base_addr = q->nor_size * (nor - q->nor);
-}
-
-static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
-{
- int ret;
- struct fsl_qspi *q = nor->priv;
-
- ret = fsl_qspi_runcmd(q, opcode, 0, len);
- if (ret)
- return ret;
-
- fsl_qspi_read_data(q, len, buf);
- return 0;
-}
-
-static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
-{
- struct fsl_qspi *q = nor->priv;
- int ret;
-
- if (!buf) {
- ret = fsl_qspi_runcmd(q, opcode, 0, 1);
- if (ret)
- return ret;
-
- if (opcode == SPINOR_OP_CHIP_ERASE)
- fsl_qspi_invalid(q);
-
- } else if (len > 0) {
- ret = fsl_qspi_nor_write(q, nor, opcode, 0,
- (u32 *)buf, len);
- if (ret > 0)
- return 0;
- } else {
- dev_err(q->dev, "invalid cmd %d\n", opcode);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to,
- size_t len, const u_char *buf)
-{
- struct fsl_qspi *q = nor->priv;
- ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
- (u32 *)buf, len);
-
- /* invalid the data in the AHB buffer. */
- fsl_qspi_invalid(q);
- return ret;
-}
-
-static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from,
- size_t len, u_char *buf)
-{
- struct fsl_qspi *q = nor->priv;
- u8 cmd = nor->read_opcode;
-
- /* if necessary,ioremap buffer before AHB read, */
- if (!q->ahb_addr) {
- q->memmap_offs = q->chip_base_addr + from;
- q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
-
- q->ahb_addr = ioremap_nocache(
- q->memmap_phy + q->memmap_offs,
- q->memmap_len);
- if (!q->ahb_addr) {
- dev_err(q->dev, "ioremap failed\n");
- return -ENOMEM;
- }
- /* ioremap if the data requested is out of range */
- } else if (q->chip_base_addr + from < q->memmap_offs
- || q->chip_base_addr + from + len >
- q->memmap_offs + q->memmap_len) {
- iounmap(q->ahb_addr);
-
- q->memmap_offs = q->chip_base_addr + from;
- q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
- q->ahb_addr = ioremap_nocache(
- q->memmap_phy + q->memmap_offs,
- q->memmap_len);
- if (!q->ahb_addr) {
- dev_err(q->dev, "ioremap failed\n");
- return -ENOMEM;
- }
- }
-
- dev_dbg(q->dev, "cmd [%x],read from %p, len:%zd\n",
- cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
- len);
-
- /* Read out the data directly from the AHB buffer.*/
- memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
- len);
-
- return len;
-}
-
-static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
-{
- struct fsl_qspi *q = nor->priv;
- int ret;
-
- dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
- nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs);
-
- ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0);
- if (ret)
- return ret;
-
- fsl_qspi_invalid(q);
- return 0;
-}
-
-static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
- struct fsl_qspi *q = nor->priv;
- int ret;
-
- mutex_lock(&q->lock);
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret)
- goto err_mutex;
-
- fsl_qspi_set_base_addr(q, nor);
- return 0;
-
-err_mutex:
- mutex_unlock(&q->lock);
- return ret;
-}
-
-static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
-{
- struct fsl_qspi *q = nor->priv;
-
- fsl_qspi_clk_disable_unprep(q);
- mutex_unlock(&q->lock);
-}
-
-static int fsl_qspi_probe(struct platform_device *pdev)
-{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ_1_1_4 |
- SNOR_HWCAPS_PP,
- };
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct fsl_qspi *q;
- struct resource *res;
- struct spi_nor *nor;
- struct mtd_info *mtd;
- int ret, i = 0;
-
- q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
- if (!q)
- return -ENOMEM;
-
- q->nor_num = of_get_child_count(dev->of_node);
- if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
- return -ENODEV;
-
- q->dev = dev;
- q->devtype_data = of_device_get_match_data(dev);
- if (!q->devtype_data)
- return -ENODEV;
- platform_set_drvdata(pdev, q);
-
- /* find the resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
- q->iobase = devm_ioremap_resource(dev, res);
- if (IS_ERR(q->iobase))
- return PTR_ERR(q->iobase);
-
- q->big_endian = of_property_read_bool(np, "big-endian");
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "QuadSPI-memory");
- if (!devm_request_mem_region(dev, res->start, resource_size(res),
- res->name)) {
- dev_err(dev, "can't request region for resource %pR\n", res);
- return -EBUSY;
- }
-
- q->memmap_phy = res->start;
-
- /* find the clocks */
- q->clk_en = devm_clk_get(dev, "qspi_en");
- if (IS_ERR(q->clk_en))
- return PTR_ERR(q->clk_en);
-
- q->clk = devm_clk_get(dev, "qspi");
- if (IS_ERR(q->clk))
- return PTR_ERR(q->clk);
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret) {
- dev_err(dev, "can not enable the clock\n");
- goto clk_failed;
- }
-
- /* find the irq */
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "failed to get the irq: %d\n", ret);
- goto irq_failed;
- }
-
- ret = devm_request_irq(dev, ret,
- fsl_qspi_irq_handler, 0, pdev->name, q);
- if (ret) {
- dev_err(dev, "failed to request irq: %d\n", ret);
- goto irq_failed;
- }
-
- ret = fsl_qspi_nor_setup(q);
- if (ret)
- goto irq_failed;
-
- if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
- q->has_second_chip = true;
-
- mutex_init(&q->lock);
-
- /* iterate the subnodes. */
- for_each_available_child_of_node(dev->of_node, np) {
- /* skip the holes */
- if (!q->has_second_chip)
- i *= 2;
-
- nor = &q->nor[i];
- mtd = &nor->mtd;
-
- nor->dev = dev;
- spi_nor_set_flash_node(nor, np);
- nor->priv = q;
-
- if (q->nor_num > 1 && !mtd->name) {
- int spiflash_idx;
-
- ret = of_property_read_u32(np, "reg", &spiflash_idx);
- if (!ret) {
- mtd->name = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%d",
- dev_name(dev),
- spiflash_idx);
- if (!mtd->name) {
- ret = -ENOMEM;
- goto mutex_failed;
- }
- } else {
- dev_warn(dev, "reg property is missing\n");
- }
- }
-
- /* fill the hooks */
- nor->read_reg = fsl_qspi_read_reg;
- nor->write_reg = fsl_qspi_write_reg;
- nor->read = fsl_qspi_read;
- nor->write = fsl_qspi_write;
- nor->erase = fsl_qspi_erase;
-
- nor->prepare = fsl_qspi_prep;
- nor->unprepare = fsl_qspi_unprep;
-
- ret = of_property_read_u32(np, "spi-max-frequency",
- &q->clk_rate);
- if (ret < 0)
- goto mutex_failed;
-
- /* set the chip address for READID */
- fsl_qspi_set_base_addr(q, nor);
-
- ret = spi_nor_scan(nor, NULL, &hwcaps);
- if (ret)
- goto mutex_failed;
-
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret)
- goto mutex_failed;
-
- /* Set the correct NOR size now. */
- if (q->nor_size == 0) {
- q->nor_size = mtd->size;
-
- /* Map the SPI NOR to accessiable address */
- fsl_qspi_set_map_addr(q);
- }
-
- /*
- * The TX FIFO is 64 bytes in the Vybrid, but the Page Program
- * may writes 265 bytes per time. The write is working in the
- * unit of the TX FIFO, not in the unit of the SPI NOR's page
- * size.
- *
- * So shrink the spi_nor->page_size if it is larger then the
- * TX FIFO.
- */
- if (nor->page_size > q->devtype_data->txfifo)
- nor->page_size = q->devtype_data->txfifo;
-
- i++;
- }
-
- /* finish the rest init. */
- ret = fsl_qspi_nor_setup_last(q);
- if (ret)
- goto last_init_failed;
-
- fsl_qspi_clk_disable_unprep(q);
- return 0;
-
-last_init_failed:
- for (i = 0; i < q->nor_num; i++) {
- /* skip the holes */
- if (!q->has_second_chip)
- i *= 2;
- mtd_device_unregister(&q->nor[i].mtd);
- }
-mutex_failed:
- mutex_destroy(&q->lock);
-irq_failed:
- fsl_qspi_clk_disable_unprep(q);
-clk_failed:
- dev_err(dev, "Freescale QuadSPI probe failed\n");
- return ret;
-}
-
-static int fsl_qspi_remove(struct platform_device *pdev)
-{
- struct fsl_qspi *q = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < q->nor_num; i++) {
- /* skip the holes */
- if (!q->has_second_chip)
- i *= 2;
- mtd_device_unregister(&q->nor[i].mtd);
- }
-
- /* disable the hardware */
- qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
- qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
-
- mutex_destroy(&q->lock);
-
- if (q->ahb_addr)
- iounmap(q->ahb_addr);
-
- return 0;
-}
-
-static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state)
-{
- return 0;
-}
-
-static int fsl_qspi_resume(struct platform_device *pdev)
-{
- int ret;
- struct fsl_qspi *q = platform_get_drvdata(pdev);
-
- ret = fsl_qspi_clk_prep_enable(q);
- if (ret)
- return ret;
-
- fsl_qspi_nor_setup(q);
- fsl_qspi_set_map_addr(q);
- fsl_qspi_nor_setup_last(q);
-
- fsl_qspi_clk_disable_unprep(q);
-
- return 0;
-}
-
-static struct platform_driver fsl_qspi_driver = {
- .driver = {
- .name = "fsl-quadspi",
- .of_match_table = fsl_qspi_dt_ids,
- },
- .probe = fsl_qspi_probe,
- .remove = fsl_qspi_remove,
- .suspend = fsl_qspi_suspend,
- .resume = fsl_qspi_resume,
-};
-module_platform_driver(fsl_qspi_driver);
-
-MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
-MODULE_AUTHOR("Freescale Semiconductor Inc.");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index 5442993b71ff..d9eed6844ba1 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -431,7 +431,8 @@ static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ_FAST |
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
SNOR_HWCAPS_READ_1_1_2 |
SNOR_HWCAPS_PP,
};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 6e13bbd1aaa5..fae147452aff 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -68,7 +68,7 @@ enum spi_nor_read_command_index {
SNOR_CMD_READ_4_4_4,
SNOR_CMD_READ_1_4_4_DTR,
- /* Octo SPI */
+ /* Octal SPI */
SNOR_CMD_READ_1_1_8,
SNOR_CMD_READ_1_8_8,
SNOR_CMD_READ_8_8_8,
@@ -85,7 +85,7 @@ enum spi_nor_pp_command_index {
SNOR_CMD_PP_1_4_4,
SNOR_CMD_PP_4_4_4,
- /* Octo SPI */
+ /* Octal SPI */
SNOR_CMD_PP_1_1_8,
SNOR_CMD_PP_1_8_8,
SNOR_CMD_PP_8_8_8,
@@ -278,6 +278,7 @@ struct flash_info {
#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
+#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -398,6 +399,8 @@ static u8 spi_nor_convert_3to4_read(u8 opcode)
{ SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
{ SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
{ SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
{ SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
{ SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
@@ -414,6 +417,8 @@ static u8 spi_nor_convert_3to4_program(u8 opcode)
{ SPINOR_OP_PP, SPINOR_OP_PP_4B },
{ SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
{ SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
};
return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
@@ -1740,7 +1745,11 @@ static const struct flash_info spi_nor_ids[] = {
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
+ { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
@@ -1836,6 +1845,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
@@ -1847,6 +1858,8 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &mx25l25635_fixups },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
@@ -1872,7 +1885,8 @@ static const struct flash_info spi_nor_ids[] = {
/* Micron */
{
"mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_4B_OPCODES)
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_4B_OPCODES)
},
/* PMC */
@@ -1885,13 +1899,17 @@ static const struct flash_info spi_nor_ids[] = {
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
@@ -3591,6 +3609,13 @@ static int spi_nor_init_params(struct spi_nor *nor,
SNOR_PROTO_1_1_4);
}
+ if (info->flags & SPI_NOR_OCTAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ 0, 8, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_1_1_8);
+ }
+
/* Page Program settings. */
params->hwcaps.mask |= SNOR_HWCAPS_PP;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 485462d3087f..537c90c8eb0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
- /* Link-local multicast packets should be passed to the
- * stack on the link they arrive as well as pass them to the
- * bond-master device. These packets are mostly usable when
- * stack receives it with the link on which they arrive
- * (e.g. LLDP) they also must be available on master. Some of
- * the use cases include (but are not limited to): LLDP agents
- * that must be able to operate both on enslaved interfaces as
- * well as on bonds themselves; linux bridges that must be able
- * to process/pass BPDUs from attached bonds when any kind of
- * STP version is enabled on the network.
+ /*
+ * For packets determined by bond_should_deliver_exact_match() call to
+ * be suppressed we want to make an exception for link-local packets.
+ * This is necessary for e.g. LLDP daemons to be able to monitor
+ * inactive slave links without being forced to bind to them
+ * explicitly.
+ *
+ * At the same time, packets that are passed to the bonding master
+ * (including link-local ones) can have their originating interface
+ * determined via PACKET_ORIGDEV socket option.
*/
- if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (nskb) {
- nskb->dev = bond->dev;
- nskb->queue_mapping = 0;
- netif_rx(nskb);
- }
- return RX_HANDLER_PASS;
- }
- if (bond_should_deliver_exact_match(skb, slave, bond))
+ if (bond_should_deliver_exact_match(skb, slave, bond)) {
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ return RX_HANDLER_PASS;
return RX_HANDLER_EXACT;
+ }
skb->dev = bond->dev;
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0e4bbdcc614f..c76892ac4e69 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
}
-static void b53_enable_vlan(struct b53_device *dev, bool enable)
+static void b53_enable_vlan(struct b53_device *dev, bool enable,
+ bool enable_filtering)
{
u8 mgmt, vc0, vc1, vc4 = 0, vc5;
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
- vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
- vc5 |= VC5_DROP_VTABLE_MISS;
+ if (enable_filtering) {
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+ } else {
+ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+ }
if (is5325(dev))
vc0 &= ~VC0_RESERVED_1;
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
}
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+ dev->vlan_enabled = enable;
+ dev->vlan_filtering_enabled = enable_filtering;
}
static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
+static u16 b53_default_pvid(struct b53_device *dev)
+{
+ if (is5325(dev) || is5365(dev))
+ return 1;
+ else
+ return 0;
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
struct b53_vlan vl = { 0 };
- int i;
+ int i, def_vid;
+
+ def_vid = b53_default_pvid(dev);
/* clear all vlan entries */
if (is5325(dev) || is5365(dev)) {
- for (i = 1; i < dev->num_vlans; i++)
+ for (i = def_vid; i < dev->num_vlans; i++)
b53_set_vlan_entry(dev, i, &vl);
} else {
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, false);
+ b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,
- B53_VLAN_PORT_DEF_TAG(i), 1);
+ B53_VLAN_PORT_DEF_TAG(i), def_vid);
if (!is5325(dev) && !is5365(dev))
b53_set_jumbo(dev, dev->enable_jumbo, false);
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
+ struct b53_device *dev = ds->priv;
+ struct net_device *bridge_dev;
+ unsigned int i;
+ u16 pvid, new_pvid;
+
+ /* Handle the case were multiple bridges span the same switch device
+ * and one of them has a different setting than what is being requested
+ * which would be breaking filtering semantics for any of the other
+ * bridge devices.
+ */
+ b53_for_each_port(dev, i) {
+ bridge_dev = dsa_to_port(ds, i)->bridge_dev;
+ if (bridge_dev &&
+ bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
+ br_vlan_enabled(bridge_dev) != vlan_filtering) {
+ netdev_err(bridge_dev,
+ "VLAN filtering is global to the switch!\n");
+ return -EINVAL;
+ }
+ }
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+ new_pvid = pvid;
+ if (dev->vlan_filtering_enabled && !vlan_filtering) {
+ /* Filtering is currently enabled, use the default PVID since
+ * the bridge does not expect tagging anymore
+ */
+ dev->ports[port].pvid = pvid;
+ new_pvid = b53_default_pvid(dev);
+ } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
+ /* Filtering is currently disabled, restore the previous PVID */
+ new_pvid = dev->ports[port].pvid;
+ }
+
+ if (pvid != new_pvid)
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ new_pvid);
+
+ b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
+
return 0;
}
EXPORT_SYMBOL(b53_vlan_filtering);
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid_end > dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, true);
+ b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
return 0;
}
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_fast_age_vlan(dev, vid);
}
- if (pvid) {
+ if (pvid && !dsa_is_cpu_port(ds, port)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid_end);
b53_fast_age_vlan(dev, vid);
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
vl->members &= ~BIT(port);
- if (pvid == vid) {
- if (is5325(dev) || is5365(dev))
- pvid = 1;
- else
- pvid = 0;
- }
+ if (pvid == vid)
+ pvid = b53_default_pvid(dev);
if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag &= ~(BIT(port));
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
- if (is5325(dev) || is5365(dev))
- pvid = 1;
- else
- pvid = 0;
+ pvid = b53_default_pvid(dev);
/* Make this port join all VLANs without VLAN entries */
if (is58xx(dev)) {
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index ec796482792d..4dc7ee38b258 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -91,6 +91,7 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
struct ethtool_eee eee;
+ u16 pvid;
};
struct b53_vlan {
@@ -137,6 +138,8 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
+ bool vlan_enabled;
+ bool vlan_filtering_enabled;
unsigned int num_ports;
struct b53_port *ports;
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 17ec32b0a1cc..14138d423cf1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
{
struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_wolinfo pwol;
+ struct ethtool_wolinfo pwol = { };
/* Get the parent device WoL settings */
- p->ethtool_ops->get_wol(p, &pwol);
+ if (p->ethtool_ops->get_wol)
+ p->ethtool_ops->get_wol(p, &pwol);
/* Advertise the parent device supported settings */
wol->supported = pwol.supported;
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
- struct ethtool_wolinfo pwol;
+ struct ethtool_wolinfo pwol = { };
- p->ethtool_ops->get_wol(p, &pwol);
+ if (p->ethtool_ops->get_wol)
+ p->ethtool_ops->get_wol(p, &pwol);
if (wol->wolopts & ~pwol.supported)
return -EINVAL;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 693a67f45bef..ddc1f9ca8ebc 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1162,6 +1162,12 @@ static struct platform_driver gswip_driver = {
module_platform_driver(gswip_driver);
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 12fd7ce3f1ff..7e3c00bd9532 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -896,7 +896,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
default:
return U64_MAX;
}
- value = (((u64)high) << 16) | low;
+ value = (((u64)high) << 32) | low;
return value;
}
@@ -3093,7 +3093,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
- .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -4595,6 +4595,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
return 0;
}
+static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
+{
+ int i;
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
+}
+
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
int port)
{
@@ -4631,6 +4639,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
if (err)
goto free;
+ mv88e6xxx_ports_cmode_init(chip);
+
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_switch_reset(chip);
mutex_unlock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index ebd26b6a93e6..79ab51e69aee 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -398,6 +398,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = 0;
}
+ /* cmode doesn't change, nothing to do for us */
+ if (cmode == chip->ports[port].cmode)
+ return 0;
+
lane = mv88e6390x_serdes_get_lane(chip, port);
if (lane < 0)
return lane;
@@ -408,7 +412,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return err;
}
- err = mv88e6390_serdes_power(chip, port, false);
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
@@ -424,7 +428,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- err = mv88e6390_serdes_power(chip, port, true);
+ err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index e583641de758..4aadf321edb7 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,6 +52,7 @@
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
+#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index b58ca7cb8e9d..fbba300c1d01 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -275,6 +275,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
+ /* Tx TC/Queue number config */
+ hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
+
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 939f77e2e117..8ac7a67b15c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1274,6 +1274,15 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT,
+ tx_traf_class_mode);
+}
+
void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 03c570d115fe..f529540bfd7e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -605,6 +605,10 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
/* tpb */
+/* set TX Traffic Class Mode */
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode);
+
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 8470d92db812..e91ffce005f1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1948,6 +1948,19 @@
/* default value of bitfield tx_buf_en */
#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
+/* register address for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
+/* bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
+/* lower bit position of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
+/* width of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
+/* default value of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
+
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bb41becb6609..31ff1e0d1baa 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
- static int cards_found;
+ static int cards_found = 0;
unsigned long mmio_start;
int mmio_len;
int err;
- cards_found = 0;
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 28c9b0bdf2f6..bc3ac369cbe3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev,
priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
reg = rxchk_readl(priv, RXCHK_CONTROL);
+ /* Clear L2 header checks, which would prevent BPDUs
+ * from being received.
+ */
+ reg &= ~RXCHK_L2_HDR_DIS;
if (priv->rx_chk_en)
reg |= RXCHK_EN;
else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8bc7e495b027..803f7990d32b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -500,6 +500,12 @@ normal_tx:
}
length >>= 9;
+ if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
+ dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
+ skb->len);
+ i = 0;
+ goto tx_dma_error;
+ }
flags |= bnxt_lhint_arr[length];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
@@ -3903,7 +3909,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (len)
break;
/* on first few passes, just barely sleep */
- if (i < DFLT_HWRM_CMD_TIMEOUT)
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
else
@@ -3926,7 +3932,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
dma_rmb();
if (*valid)
break;
- udelay(1);
+ usleep_range(1, 5);
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a451796deefe..2fb653e0048d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -582,7 +582,7 @@ struct nqe_cn {
(HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
-#define HWRM_VALID_BIT_DELAY_USEC 20
+#define HWRM_VALID_BIT_DELAY_USEC 150
#define BNXT_HWRM_CHNL_CHIMP 0
#define BNXT_HWRM_CHNL_KONG 1
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..62636c1ed141 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
};
struct nicvf_work {
- struct delayed_work work;
+ struct work_struct work;
u8 mode;
struct xcast_addr_list *mc;
};
@@ -327,7 +327,11 @@ struct nicvf {
struct nicvf_work rx_mode_work;
/* spinlock to protect workqueue arguments from concurrent access */
spinlock_t rx_mode_wq_lock;
-
+ /* workqueue for handling kernel ndo_set_rx_mode() calls */
+ struct workqueue_struct *nicvf_rx_mode_wq;
+ /* mutex to protect VF's mailbox contents from concurrent access */
+ struct mutex rx_mode_mtx;
+ struct delayed_work link_change_work;
/* PTP timestamp */
struct cavium_ptp *ptp_clock;
/* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
struct xcast {
u8 msg;
- union {
- u8 mode;
- u64 mac;
- } data;
+ u8 mode;
+ u64 mac:48;
};
/* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6c8dcb65ff03..c90252829ed3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -57,14 +57,8 @@ struct nicpf {
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
u8 *vf_lmac_map;
- struct delayed_work dwork;
- struct workqueue_struct *check_link;
- u8 *link;
- u8 *duplex;
- u32 *speed;
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
- bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
u8 num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
}
+/* Get BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_link_status_get(struct nicpf *nic, u8 vf)
+{
+ union nic_mbx mbx = {};
+ struct bgx_link_status link;
+ u8 bgx, lmac;
+
+ mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+ /* Get BGX, LMAC indices for the VF */
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ /* Get interface link status */
+ bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ mbx.link_status.mac_type = link.mac_type;
+
+ /* reply with link status */
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
int i;
int ret = 0;
- nic->mbx_lock[vf] = true;
-
mbx_addr = nic_get_mbx_addr(vf);
mbx_data = (u64 *)&mbx;
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
- if (vf < nic->num_vf_en) {
- nic->link[vf] = 0;
- nic->duplex[vf] = 0;
- nic->speed[vf] = 0;
- }
- goto unlock;
+ return;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_RSS_SIZE:
nic_send_rss_size(nic, vf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_RSS_CFG:
case NIC_MBOX_MSG_RSS_CFG_CONT:
nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
nic_enable_vf(nic, vf, true);
- goto unlock;
+ break;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
- goto unlock;
+ return;
case NIC_MBOX_MSG_NICVF_PTR:
nic->nicvf[vf] = mbx.nicvf.nicvf;
break;
case NIC_MBOX_MSG_PNICVF_PTR:
nic_send_pnicvf(nic, vf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_SNICVF_PTR:
nic_send_snicvf(nic, &mbx.nicvf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_BGX_STATS:
nic_get_bgx_stats(nic, &mbx.bgx_stats);
- goto unlock;
+ return;
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
- goto unlock;
+ return;
case NIC_MBOX_MSG_PTP_CFG:
nic_config_timestamp(nic, vf, &mbx.ptp);
break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
- mbx.xcast.data.mac,
+ mbx.xcast.mac,
vf < NIC_VF_PER_MBX_REG ? vf :
vf - NIC_VF_PER_MBX_REG);
break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
}
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode);
+ bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
+ break;
+ }
+ nic_link_status_get(nic, vf);
+ return;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
mbx.msg.msg, vf);
nic_mbx_send_nack(nic, vf);
}
-unlock:
- nic->mbx_lock[vf] = false;
}
static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
return 0;
}
-/* Poll for BGX LMAC link status and update corresponding VF
- * if there is a change, valid only if internal L2 switch
- * is not present otherwise VF link is always treated as up
- */
-static void nic_poll_for_link(struct work_struct *work)
-{
- union nic_mbx mbx = {};
- struct nicpf *nic;
- struct bgx_link_status link;
- u8 vf, bgx, lmac;
-
- nic = container_of(work, struct nicpf, dwork.work);
-
- mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
-
- for (vf = 0; vf < nic->num_vf_en; vf++) {
- /* Poll only if VF is UP */
- if (!nic->vf_enabled[vf])
- continue;
-
- /* Get BGX, LMAC indices for the VF */
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- /* Get interface link status */
- bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
-
- /* Inform VF only if link status changed */
- if (nic->link[vf] == link.link_up)
- continue;
-
- if (!nic->mbx_lock[vf]) {
- nic->link[vf] = link.link_up;
- nic->duplex[vf] = link.duplex;
- nic->speed[vf] = link.speed;
-
- /* Send a mbox message to VF with current link status */
- mbx.link_status.link_up = link.link_up;
- mbx.link_status.duplex = link.duplex;
- mbx.link_status.speed = link.speed;
- mbx.link_status.mac_type = link.mac_type;
- nic_send_msg_to_vf(nic, vf, &mbx);
- }
- }
- queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
-}
-
static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!nic->vf_lmac_map)
goto err_release_regions;
- nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->link)
- goto err_release_regions;
-
- nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->duplex)
- goto err_release_regions;
-
- nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
- if (!nic->speed)
- goto err_release_regions;
-
/* Initialize hardware */
nic_init_hw(nic);
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_unregister_interrupts;
- /* Register a physical link status poll fn() */
- nic->check_link = alloc_workqueue("check_link_status",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
- if (!nic->check_link) {
- err = -ENOMEM;
- goto err_disable_sriov;
- }
-
- INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
- queue_delayed_work(nic->check_link, &nic->dwork, 0);
-
return 0;
-err_disable_sriov:
- if (nic->flags & NIC_SRIOV_ENABLED)
- pci_disable_sriov(pdev);
err_unregister_interrupts:
nic_unregister_interrupts(nic);
err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
if (nic->flags & NIC_SRIOV_ENABLED)
pci_disable_sriov(pdev);
- if (nic->check_link) {
- /* Destroy work Queue */
- cancel_delayed_work_sync(&nic->dwork);
- destroy_workqueue(nic->check_link);
- }
-
nic_unregister_interrupts(nic);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..503cfadff4ac 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
-/* workqueue for handling kernel ndo_set_rx_mode() calls */
-static struct workqueue_struct *nicvf_rx_mode_wq;
-
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
int timeout = NIC_MBOX_MSG_TIMEOUT;
int sleep = 10;
+ int ret = 0;
+
+ mutex_lock(&nic->rx_mode_mtx);
nic->pf_acked = false;
nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
netdev_err(nic->netdev,
"PF NACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
msleep(sleep);
if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
netdev_err(nic->netdev,
"PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
- return -EBUSY;
+ ret = -EBUSY;
+ break;
}
}
- return 0;
+ mutex_unlock(&nic->rx_mode_mtx);
+ return ret;
}
/* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
return 1;
}
+static void nicvf_send_cfg_done(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to CFG DONE msg\n");
+ }
+}
+
static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
{
if (bgx->rx)
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
break;
case NIC_MBOX_MSG_BGX_LINK_CHANGE:
nic->pf_acked = true;
- nic->link_up = mbx.link_status.link_up;
- nic->duplex = mbx.link_status.duplex;
- nic->speed = mbx.link_status.speed;
- nic->mac_type = mbx.link_status.mac_type;
- if (nic->link_up) {
- netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
- nic->speed,
- nic->duplex == DUPLEX_FULL ?
- "Full" : "Half");
- netif_carrier_on(nic->netdev);
- netif_tx_start_all_queues(nic->netdev);
- } else {
- netdev_info(nic->netdev, "Link is Down\n");
- netif_carrier_off(nic->netdev);
- netif_tx_stop_all_queues(nic->netdev);
+ if (nic->link_up != mbx.link_status.link_up) {
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ nic->mac_type = mbx.link_status.mac_type;
+ if (nic->link_up) {
+ netdev_info(nic->netdev,
+ "Link is Up %d Mbps %s duplex\n",
+ nic->speed,
+ nic->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
+ netif_carrier_on(nic->netdev);
+ netif_tx_start_all_queues(nic->netdev);
+ } else {
+ netdev_info(nic->netdev, "Link is Down\n");
+ netif_carrier_off(nic->netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ }
}
break;
case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
+ cancel_delayed_work_sync(&nic->link_change_work);
+
+ /* wait till all queued set_rx_mode tasks completes */
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
return nicvf_send_msg_to_pf(nic, &mbx);
}
+static void nicvf_link_status_check_task(struct work_struct *work_arg)
+{
+ struct nicvf *nic = container_of(work_arg,
+ struct nicvf,
+ link_change_work.work);
+ union nic_mbx mbx = {};
+ mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 2 * HZ);
+}
+
int nicvf_open(struct net_device *netdev)
{
int cpu, err, qidx;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL;
- union nic_mbx mbx = {};
+
+ /* wait till all queued set_rx_mode tasks completes if any */
+ drain_workqueue(nic->nicvf_rx_mode_wq);
netif_carrier_off(netdev);
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
/* Send VF config done msg to PF */
- mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
- nicvf_write_to_mbx(nic, &mbx);
+ nicvf_send_cfg_done(nic);
+
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
return 0;
cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
/* flush DMAC filters and reset RX mode */
mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
- nicvf_send_msg_to_pf(nic, &mbx);
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
if (mode & BGX_XCAST_MCAST_FILTER) {
/* once enabling filtering, we need to signal to PF to add
* its' own LMAC to the filter to accept packets for it.
*/
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = 0;
- nicvf_send_msg_to_pf(nic, &mbx);
+ mbx.xcast.mac = 0;
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
}
/* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
/* now go through kernel list of MACs and add them one by one */
for (idx = 0; idx < mc_addrs->count; idx++) {
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = mc_addrs->mc[idx];
- nicvf_send_msg_to_pf(nic, &mbx);
+ mbx.xcast.mac = mc_addrs->mc[idx];
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
}
- kfree(mc_addrs);
}
/* and finally set rx mode for PF accordingly */
mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
- mbx.xcast.data.mode = mode;
+ mbx.xcast.mode = mode;
nicvf_send_msg_to_pf(nic, &mbx);
+free_mc:
+ kfree(mc_addrs);
}
static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
{
struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
- work.work);
+ work);
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
u8 mode;
struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
kfree(nic->rx_mode_work.mc);
nic->rx_mode_work.mc = mc_list;
nic->rx_mode_work.mode = mode;
- queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
+ queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
spin_unlock(&nic->rx_mode_wq_lock);
}
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&nic->reset_task, nicvf_reset_task);
- INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
+ nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
+ WQ_MEM_RECLAIM,
+ nic->vf_id);
+ INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
spin_lock_init(&nic->rx_mode_wq_lock);
+ mutex_init(&nic->rx_mode_mtx);
err = register_netdev(netdev);
if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
nic = netdev_priv(netdev);
pnetdev = nic->pnicvf->netdev;
- cancel_delayed_work_sync(&nic->rx_mode_work.work);
-
/* Check if this Qset is assigned to different VF.
* If yes, clean primary and all secondary Qsets.
*/
if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
unregister_netdev(pnetdev);
+ if (nic->nicvf_rx_mode_wq) {
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
+ nic->nicvf_rx_mode_wq = NULL;
+ }
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
static int __init nicvf_init_module(void)
{
pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
- nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
- WQ_MEM_RECLAIM);
return pci_register_driver(&nicvf_driver);
}
static void __exit nicvf_cleanup_module(void)
{
- if (nicvf_rx_mode_wq) {
- destroy_workqueue(nicvf_rx_mode_wq);
- nicvf_rx_mode_wq = NULL;
- }
pci_unregister_driver(&nicvf_driver);
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index e337da6ba2a4..673c57b8023f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
/* Disable MAC steering (NCSI traffic) */
for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
- bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
}
static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cbdd20b9ee6f..5cbc54e9eb19 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -60,7 +60,7 @@
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
#define RX_DMAC_COUNT 32
-#define BGX_CMR_RX_STREERING 0x300
+#define BGX_CMR_RX_STEERING 0x300
#define RX_TRAFFIC_STEER_RULE_COUNT 8
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index c041f44324db..b3654598a2d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
lld->udb_density = 1 << adap->params.sge.eq_qpp;
lld->ucq_density = 1 << adap->params.sge.iq_qpp;
+ lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
lld->filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 5fa9a2d5fc4b..21da34a4ca24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -336,6 +336,7 @@ struct cxgb4_lld_info {
unsigned int cclk_ps; /* Core clock period in psec */
unsigned short udb_density; /* # of user DB/page */
unsigned short ucq_density; /* # of user CQs/page */
+ unsigned int sge_host_page_size; /* SGE host page size */
unsigned short filt_mode; /* filter optional components */
unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
/* scheduler queue */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index b8155f5e71b4..ac55db065f16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3128,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
}
+
+ put_device(&pdev->dev);
+
return 0;
}
EXPORT_SYMBOL(hns_dsaf_roce_reset);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f52e2c46e6a7..e4ff531db14a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
!i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
if (!ok) {
+ /* Log this in case the user has forgotten to give the kernel
+ * any buffers, even later in the application.
+ */
dev_info(&vsi->back->pdev->dev,
- "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
ring->xsk_umem ? "UMEM enabled " : "",
ring->queue_index, pf_q);
}
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
- if (i40e_enabled_xdp_vsi(vsi))
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* Make sure that in-progress ndo_xdp_xmit
+ * calls are completed.
+ */
+ synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]);
+ }
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
if (old_prog)
bpf_prog_put(old_prog);
+ /* Kick start the NAPI context if there is an AF_XDP socket open
+ * on that queue id. This so that receiving will start.
+ */
+ if (need_reset && prog)
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i]->xsk_umem)
+ (void)i40e_xsk_async_xmit(vsi->netdev, i);
+
return 0;
}
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
{
i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
- if (i40e_enabled_xdp_vsi(vsi))
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* Make sure that in-progress ndo_xdp_xmit calls are
+ * completed.
+ */
+ synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
+ }
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a7e14e98889f..6c97667d20ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_ring *xdp_ring;
int drops = 0;
int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..3827f16e6923 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
+
+ /* Kick start the NAPI context so that receiving will start */
+ err = i40e_xsk_async_xmit(vsi->netdev, qid);
+ if (err)
+ return err;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..cb35d8202572 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
- /* Enable L3/L4 for Tx Switched packets */
- mrqc |= IXGBE_MRQC_L3L4TXSWEN;
+ /* Enable L3/L4 for Tx Switched packets only for X550,
+ * older devices do not support this feature
+ */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ mrqc |= IXGBE_MRQC_L3L4TXSWEN;
} else {
if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct bpf_prog *old_prog;
+ bool need_reset;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return -EINVAL;
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return -ENOMEM;
old_prog = xchg(&adapter->xdp_prog, prog);
+ need_reset = (!!prog != !!old_prog);
/* If transitioning XDP modes reconfigure rings */
- if (!!prog != !!old_prog) {
+ if (need_reset) {
int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (old_prog)
bpf_prog_put(old_prog);
+ /* Kick start the NAPI context if there is an AF_XDP socket open
+ * on that queue id. This so that receiving will start.
+ */
+ if (need_reset && prog)
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->xdp_ring[i]->xsk_umem)
+ (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65c3e2c979d4..36a8879536a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
ixgbe_txrx_ring_disable(adapter, qid);
err = ixgbe_add_xsk_umem(adapter, umem, qid);
+ if (err)
+ return err;
- if (if_running)
+ if (if_running) {
ixgbe_txrx_ring_enable(adapter, qid);
- return err;
+ /* Kick start the NAPI context so that receiving will start */
+ err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
- if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
+ !netif_carrier_ok(xdp_ring->netdev)) {
work_done = false;
break;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2f427271a793..292a668ce88e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
- return ret;
+ goto err_put_clk;
pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
infer_hw_params(msp);
return 0;
+
+err_put_clk:
+ if (!IS_ERR(msp->clk))
+ clk_disable_unprepare(msp->clk);
+ return ret;
}
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d4568eb2297..8433fb9c3eee 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2146,7 +2146,7 @@ err_drop_frame:
if (unlikely(!skb))
goto err_drop_frame_ret_pool;
- dma_sync_single_range_for_cpu(dev->dev.parent,
+ dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
rx_desc->buf_phys_addr,
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f3a5fa84860f..57727fe1501e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 200;
+ pdev->d3_delay = 300;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6b88881b8e35..c1438ae52a11 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 32519c93df17..b65e274b02e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
bool configure = false;
bool pfc = false;
+ u16 thres_cells;
+ u16 delay_cells;
bool lossy;
- u16 thres;
for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
if (prio_tc[j] == i) {
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
continue;
lossy = !(pfc || pause_en);
- thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
- pause_en);
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
+ thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+ delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
+ pfc, pause_en);
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
+ thres_cells, lossy);
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index f6ecfa778660..8f72587b5a2c 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1681,5 +1681,5 @@ MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
MODULE_LICENSE("GPL");
module_param_named(debug, debug.msg_enable, int, 0);
-MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)");
+MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)");
MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 310807ef328b..4d1b4a24907f 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1400,7 +1400,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
}
static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
- unsigned int frame_length)
+ unsigned int frame_length,
+ int nr_frags)
{
/* called only from within lan743x_tx_xmit_frame.
* assuming tx->ring_lock has already been acquired.
@@ -1410,6 +1411,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
/* wrap up previous descriptor */
tx->frame_data0 |= TX_DESC_DATA0_EXT_;
+ if (nr_frags <= 0) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = tx->frame_data0;
@@ -1514,8 +1519,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
u32 tx_tail_flags = 0;
/* wrap up previous descriptor */
- tx->frame_data0 |= TX_DESC_DATA0_LS_;
- tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
+ TX_DESC_DATA0_DTYPE_DATA_) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
@@ -1600,7 +1608,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
if (gso)
- lan743x_tx_frame_add_lso(tx, frame_length);
+ lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
if (nr_frags <= 0)
goto finish;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e23ca90289f7..0a868c829b90 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
static int
wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum alu_op alu_op, bool skip)
+ enum alu_op alu_op)
{
const struct bpf_insn *insn = &meta->insn;
- if (skip) {
- meta->skip = true;
- return 0;
- }
-
wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
@@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
}
static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
}
static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
}
static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
}
static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
}
static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index beb8e5d6401a..ded556b7bab5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
+ if (!ether_addr_equal(ethh->h_dest,
+ p_hwfn->p_rdma_info->iwarp.mac_addr)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "Got unexpected mac %pM instead of %pM\n",
+ ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
+ return -EINVAL;
+ }
+
ether_addr_copy(remote_mac_addr, ethh->h_source);
ether_addr_copy(local_mac_addr, ethh->h_dest);
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
- u32 mpa_buff_size;
+ u32 buff_size;
u16 n_ooo_bufs;
int rc = 0;
int i;
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
- data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
+ data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
goto err;
}
+ buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
QED_IWARP_LL2_SYN_RX_SIZE,
- QED_IWARP_MAX_SYN_PKT_SIZE,
+ buff_size,
iwarp_info->ll2_syn_handle);
if (rc)
goto err;
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
- mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
data.input.rx_num_desc,
- mpa_buff_size,
+ buff_size,
iwarp_info->ll2_mpa_handle);
if (rc)
goto err;
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
- iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
+ iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
if (!iwarp_info->mpa_intermediate_buf)
goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index b8f612d00241..7ac959038324 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_LL2_SYN_TX_SIZE (128)
#define QED_IWARP_LL2_SYN_RX_SIZE (256)
-#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
#define QED_IWARP_MAX_OOO (16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 20299f6f65fc..736e29635b77 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwmac4_rx_check_timestamp(void *desc)
{
struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
+ unsigned int rdes1 = le32_to_cpu(p->des1);
+ unsigned int rdes3 = le32_to_cpu(p->des3);
u32 own, ctxt;
int ret = 1;
- own = p->des3 & RDES3_OWN;
- ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+ own = rdes3 & RDES3_OWN;
+ ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
if (likely(!own && ctxt)) {
- if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
/* Corrupted value */
ret = -EINVAL;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5d85742a2be0..3c749c327cbd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_eee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
- priv->eee_enabled = edata->eee_enabled;
-
- if (!priv->eee_enabled)
+ if (!edata->eee_enabled) {
stmmac_disable_eee_mode(priv);
- else {
+ } else {
/* We are asking for enabling the EEE but it is safe
* to verify all by invoking the eee_init function.
* In case of failure it will return an error.
*/
- priv->eee_enabled = stmmac_eee_init(priv);
- if (!priv->eee_enabled)
+ edata->eee_enabled = stmmac_eee_init(priv);
+ if (!edata->eee_enabled)
return -EOPNOTSUPP;
-
- /* Do not change tx_lpi_timer in case of failure */
- priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(dev->phydev, edata);
+ ret = phy_ethtool_set_eee(dev->phydev, edata);
+ if (ret)
+ return ret;
+
+ priv->eee_enabled = edata->eee_enabled;
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ return 0;
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1f612268c998..d847f672a705 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
const char *name;
char node_name[32];
- if (of_property_read_string(node, "label", &name) < 0) {
+ if (of_property_read_string(child, "label", &name) < 0) {
snprintf(node_name, sizeof(node_name), "%pOFn", child);
name = node_name;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3377ac66a347..5583d993480d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -692,15 +692,20 @@ out:
static int geneve_open(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
- bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
bool metadata = geneve->collect_md;
+ bool ipv4, ipv6;
int ret = 0;
+ ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
+ ipv4 = !ipv6 || metadata;
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6 || metadata)
+ if (ipv6) {
ret = geneve_sock_add(geneve, true);
+ if (ret < 0 && ret != -EAFNOSUPPORT)
+ ipv4 = false;
+ }
#endif
- if (!ret && (!ipv6 || metadata))
+ if (ipv4)
ret = geneve_sock_add(geneve, false);
if (ret < 0)
geneve_sock_release(geneve);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 256adbd044f5..cf4897043e83 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -744,6 +744,14 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+static void netvsc_comp_ipcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph->check = 0;
+ iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct netvsc_channel *nvchan)
{
@@ -770,9 +778,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
/* skb is already created with CHECKSUM_NONE */
skb_checksum_none_assert(skb);
- /*
- * In Linux, the IP checksum is always checked.
- * Do L4 checksum offload if enabled and present.
+ /* Incoming packets may have IP header checksum verified by the host.
+ * They may not have IP header checksum computed after coalescing.
+ * We compute it here if the flags are set, because on Linux, the IP
+ * checksum is always checked.
+ */
+ if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+ csum_info->receive.ip_checksum_succeeded &&
+ skb->protocol == htons(ETH_P_IP))
+ netvsc_comp_ipcsum(skb);
+
+ /* Do L4 checksum offload if enabled and present.
*/
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 7cdac77d0c68..07e41c42bcf5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
if (!data)
return 0;
+ if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
if (data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct ipvl_dev *tmp = netdev_priv(phy_dev);
phy_dev = tmp->phy_dev;
+ if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
} else if (!netif_is_ipvlan_port(phy_dev)) {
/* Exit early if the underlying link is invalid or busy */
if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index da6a67d47ce9..56fa3606cb9c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/delay.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -325,6 +326,8 @@ static int dp83867_phy_reset(struct phy_device *phydev)
if (err < 0)
return err;
+ usleep_range(10, 20);
+
return dp83867_config_init(phydev);
}
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 82ab6ed3b74e..6bac602094bd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,8 @@
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0
+
enum {
MV_PCS_BASE_T = 0x0000,
MV_PCS_BASE_R = 0x1000,
@@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev)
else
reg = 0;
+ /* Make sure we clear unsupported 2.5G/5G advertising */
ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV10G, reg);
+ MDIO_AN_10GBT_CTRL_ADV10G |
+ MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg);
if (ret < 0)
return ret;
if (ret > 0)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 66b9cfe692fc..7368616286ae 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
- put_device(&bus->dev);
return -EINVAL;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b1f959935f50..b7df0295a3ca 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -344,6 +344,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz8061_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ return kszphy_config_init(phydev);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
@@ -1040,7 +1051,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .config_init = kszphy_config_init,
+ .config_init = ksz8061_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 938803237d7f..85987aac31c4 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -320,6 +320,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ state->pause = MLO_PAUSE_NONE;
+ state->an_complete = 0;
state->link = 1;
return pl->ops->mac_link_state(ndev, state);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index c6010fb1aa0f..cb4a23041a94 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = {
.name = "RTL8366RB Gigabit Ethernet",
.features = PHY_GBIT_FEATURES,
.config_init = &rtl8366rb_config_init,
+ /* These interrupts are handled by the irq controller
+ * embedded inside the RTL8366RB, they get unmasked when the
+ * irq is requested and ACKed by reading the status register,
+ * which is done by the irqchip code.
+ */
+ .ack_interrupt = genphy_no_ack_interrupt,
+ .config_intr = genphy_no_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 74a8782313cf..bd6084e315de 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
u16 val = 0;
int err;
- err = priv->phy_drv->read_status(phydev);
+ if (priv->phy_drv->read_status)
+ err = priv->phy_drv->read_status(phydev);
+ else
+ err = genphy_read_status(phydev);
if (err < 0)
return err;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 958f1cf67282..6ce3f666d142 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1256,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
list_add_tail_rcu(&port->list, &team->port_list);
team_port_enable(team, port);
__team_compute_features(team);
- __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
+ __team_port_change_port_added(port, !!netif_oper_up(port_dev));
__team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname);
@@ -2915,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
- if (netif_carrier_ok(dev))
+ if (netif_oper_up(dev))
team_port_change_check(port, true);
break;
case NETDEV_DOWN:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fed298c0cb39..53f4f37b0ffd 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2167,9 +2167,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
}
add_wait_queue(&tfile->wq.wait, &wait);
- current->state = TASK_INTERRUPTIBLE;
while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
ptr = ptr_ring_consume(&tfile->tx_ring);
if (ptr)
break;
@@ -2185,7 +2185,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
schedule();
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tfile->wq.wait, &wait);
out:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 735ad838e2ba..18af2f8eee96 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
- {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */
- {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 60dd1ec1665f..86c8c64fbb0f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -557,6 +557,7 @@ enum spd_duplex {
/* MAC PASSTHRU */
#define AD_MASK 0xfee0
#define BND_MASK 0x0004
+#define BD_MASK 0x0001
#define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
return -ENODEV;
}
} else {
- /* test for RTL8153-BND */
+ /* test for RTL8153-BND and RTL8153-BD */
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
- if ((ocp_data & BND_MASK) == 0) {
+ if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
netif_dbg(tp, probe, tp->netdev,
"Invalid variant for MAC pass through\n");
return -ENODEV;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95909e262ba4..7c1430ed0244 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev)
/* default to no qdisc; user can add if desired */
dev->priv_flags |= IFF_NO_QUEUE;
+
+ dev->min_mtu = 0;
+ dev->max_mtu = 0;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 320edcac4699..6359053bd0c7 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
goto out_err;
}
- genlmsg_reply(skb, info);
+ res = genlmsg_reply(skb, info);
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0e6b43bb4678..a5ea3ba495a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
.get_txpower = mt76x02_get_txpower,
};
-static int mt76x0u_register_device(struct mt76x02_dev *dev)
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
{
- struct ieee80211_hw *hw = dev->mt76.hw;
int err;
- err = mt76u_alloc_queues(&dev->mt76);
- if (err < 0)
- goto out_err;
-
- err = mt76u_mcu_init_rx(&dev->mt76);
- if (err < 0)
- goto out_err;
-
mt76x0_chip_onoff(dev, true, true);
- if (!mt76x02_wait_for_mac(&dev->mt76)) {
- err = -ETIMEDOUT;
- goto out_err;
- }
+
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
err = mt76x0u_mcu_init(dev);
if (err < 0)
- goto out_err;
+ return err;
mt76x0_init_usb_dma(dev);
err = mt76x0_init_hardware(dev);
if (err < 0)
- goto out_err;
+ return err;
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+ return 0;
+}
+
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ int err;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto out_err;
+
+ err = mt76u_mcu_init_rx(&dev->mt76);
+ if (err < 0)
+ goto out_err;
+
+ err = mt76x0u_init_hardware(dev);
+ if (err < 0)
+ goto out_err;
+
err = mt76x0_register_device(dev);
if (err < 0)
goto out_err;
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
mt76u_stop_queues(&dev->mt76);
mt76x0u_mac_stop(dev);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ mt76x0_chip_onoff(dev, false, false);
usb_kill_urb(usb->mcu.res.urb);
return 0;
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
tasklet_enable(&usb->rx_tasklet);
tasklet_enable(&usb->tx_tasklet);
- ret = mt76x0_init_hardware(dev);
+ ret = mt76x0u_init_hardware(dev);
if (ret)
goto err;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 0ccb021f1e78..10d580c3dea3 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif)
if (xenvif_hash_cache_size == 0)
return;
+ BUG_ON(vif->hash.cache.count);
+
spin_lock_init(&vif->hash.cache.lock);
INIT_LIST_HEAD(&vif->hash.cache.list);
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 182d6770f102..6da12518e693 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
+ unsigned int num_queues;
+
+ /* If queues are not set up internally - always return 0
+ * as the packet going to be dropped anyway */
+ num_queues = READ_ONCE(vif->num_queues);
+ if (num_queues < 1)
+ return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 80aae3a32c2a..f09948b009dd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
skb_frag_size_set(&frags[i], len);
}
- /* Copied all the bits from the frag list -- free it. */
- skb_frag_list_init(skb);
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
-
/* Release all the original (foreign) frags. */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+ xenvif_skb_zerocopy_prepare(queue, nskb);
if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
kfree_skb(skb);
continue;
}
+ /* Copied all the bits from the frag list -- free it. */
+ skb_frag_list_init(skb);
+ kfree_skb(nskb);
}
skb->dev = queue->vif->dev;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index c69ca95b1ad5..0f140a802137 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = {
static const char * const sdxc_a_groups[] = {
"sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
- "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
+ "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
};
static const char * const pcm_a_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index 7aae52a09ff0..4ffd56ff809e 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -79,7 +79,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
- .tile = NORTH, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 28f55248eb90..753a6a1b30c3 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <linux/i2c.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
@@ -22,12 +21,7 @@
struct pm8607_regulator_info {
struct regulator_desc desc;
- struct pm860x_chip *chip;
- struct regulator_dev *regulator;
- struct i2c_client *i2c;
- struct i2c_client *i2c_8606;
- unsigned int *vol_table;
unsigned int *vol_suspend;
int slope_double;
@@ -210,13 +204,15 @@ static const unsigned int LDO14_suspend_table[] = {
static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- int ret = -EINVAL;
+ int ret;
+
+ ret = regulator_list_voltage_table(rdev, index);
+ if (ret < 0)
+ return ret;
+
+ if (info->slope_double)
+ ret <<= 1;
- if (info->vol_table && (index < rdev->desc->n_voltages)) {
- ret = info->vol_table[index];
- if (info->slope_double)
- ret <<= 1;
- }
return ret;
}
@@ -257,6 +253,7 @@ static const struct regulator_ops pm8606_preg_ops = {
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_##vreg, \
.owner = THIS_MODULE, \
+ .volt_table = vreg##_table, \
.n_voltages = ARRAY_SIZE(vreg##_table), \
.vsel_reg = PM8607_##vreg, \
.vsel_mask = ARRAY_SIZE(vreg##_table) - 1, \
@@ -266,7 +263,6 @@ static const struct regulator_ops pm8606_preg_ops = {
.enable_mask = 1 << (ebit), \
}, \
.slope_double = (0), \
- .vol_table = (unsigned int *)&vreg##_table, \
.vol_suspend = (unsigned int *)&vreg##_suspend_table, \
}
@@ -278,6 +274,7 @@ static const struct regulator_ops pm8606_preg_ops = {
.type = REGULATOR_VOLTAGE, \
.id = PM8607_ID_LDO##_id, \
.owner = THIS_MODULE, \
+ .volt_table = LDO##_id##_table, \
.n_voltages = ARRAY_SIZE(LDO##_id##_table), \
.vsel_reg = PM8607_##vreg, \
.vsel_mask = (ARRAY_SIZE(LDO##_id##_table) - 1) << (shift), \
@@ -285,7 +282,6 @@ static const struct regulator_ops pm8606_preg_ops = {
.enable_mask = 1 << (ebit), \
}, \
.slope_double = (0), \
- .vol_table = (unsigned int *)&LDO##_id##_table, \
.vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \
}
@@ -349,6 +345,7 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
struct pm8607_regulator_info *info = NULL;
struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
struct regulator_config config = { };
+ struct regulator_dev *rdev;
struct resource *res;
int i;
@@ -371,13 +368,9 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
/* i is used to check regulator ID */
i = -1;
}
- info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
- info->i2c_8606 = (chip->id == CHIP_PM8607) ? chip->companion :
- chip->client;
- info->chip = chip;
/* check DVC ramp slope double */
- if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double)
+ if ((i == PM8607_ID_BUCK3) && chip->buck3_double)
info->slope_double = 1;
config.dev = &pdev->dev;
@@ -392,12 +385,11 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
else
config.regmap = chip->regmap_companion;
- info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
- &config);
- if (IS_ERR(info->regulator)) {
+ rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
+ if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
- return PTR_ERR(info->regulator);
+ return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, info);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index ee60a222f5eb..b7f249ee5e68 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -180,6 +180,17 @@ config REGULATOR_BCM590XX
BCM590xx PMUs. This will enable support for the software
controllable LDO/Switching regulators.
+config REGULATOR_BD70528
+ tristate "ROHM BD70528 Power Regulator"
+ depends on MFD_ROHM_BD70528
+ help
+ This driver supports voltage regulators on ROHM BD70528 PMIC.
+ This will enable support for the software controllable buck
+ and LDO regulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd70528-regulator.
+
config REGULATOR_BD718XX
tristate "ROHM BD71837 Power Regulator"
depends on MFD_ROHM_BD718XX
@@ -457,6 +468,14 @@ config REGULATOR_MAX77620
chip to control Step-Down DC-DC and LDOs. Say Y here to
enable the regulator driver.
+config REGULATOR_MAX77650
+ tristate "Maxim MAX77650/77651 regulator support"
+ depends on MFD_MAX77650
+ help
+ Regulator driver for MAX77650/77651 PMIC from Maxim
+ Semiconductor. This device has a SIMO with three independent
+ power rails and an LDO.
+
config REGULATOR_MAX8649
tristate "Maxim 8649 voltage regulator"
depends on I2C
@@ -484,7 +503,7 @@ config REGULATOR_MAX8925
tristate "Maxim MAX8925 Power Management IC"
depends on MFD_MAX8925
help
- Say y here to support the voltage regulaltor of Maxim MAX8925 PMIC.
+ Say y here to support the voltage regulator of Maxim MAX8925 PMIC.
config REGULATOR_MAX8952
tristate "Maxim MAX8952 Power Management IC"
@@ -501,7 +520,7 @@ config REGULATOR_MAX8973
select REGMAP_I2C
help
The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down
- switching regulator delievers up to 9A of output current. Each
+ switching regulator delivers up to 9A of output current. Each
phase operates at a 2MHz fixed frequency with a 120 deg shift
from the adjacent phase, allowing the use of small magnetic component.
@@ -646,7 +665,7 @@ config REGULATOR_PCF50633
tristate "NXP PCF50633 regulator driver"
depends on MFD_PCF50633
help
- Say Y here to support the voltage regulators and convertors
+ Say Y here to support the voltage regulators and converters
on PCF50633
config REGULATOR_PFUZE100
@@ -924,7 +943,7 @@ config REGULATOR_TPS65132
select REGMAP_I2C
help
This driver supports TPS65132 single inductor - dual output
- power supply specifcally designed for display panels.
+ power supply specifically designed for display panels.
config REGULATOR_TPS65217
tristate "TI TPS65217 Power regulators"
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b12e1c9b2118..1169f8a27d91 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
+obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
@@ -60,6 +61,7 @@ obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o
obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
+obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 21e20483bd91..e0239cf3f56d 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -131,7 +131,7 @@
* ACT8865 voltage number
*/
#define ACT8865_VOLTAGE_NUM 64
-#define ACT8600_SUDCDC_VOLTAGE_NUM 255
+#define ACT8600_SUDCDC_VOLTAGE_NUM 256
struct act8865 {
struct regmap *regmap;
@@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
- REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000),
+ REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000),
+ REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
};
static struct regulator_ops act8865_ops = {
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index 603db77723b6..caa61d306a69 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -87,7 +87,8 @@ static const struct regulator_linear_range act8945a_voltage_ranges[] = {
static int act8945a_set_suspend_state(struct regulator_dev *rdev, bool enable)
{
struct regmap *regmap = rdev->regmap;
- int id = rdev->desc->id, reg, val;
+ int id = rdev_get_id(rdev);
+ int reg, val;
switch (id) {
case ACT8945A_ID_DCDC1:
@@ -159,7 +160,7 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev);
struct regmap *regmap = rdev->regmap;
- int id = rdev->desc->id;
+ int id = rdev_get_id(rdev);
int reg, ret, val = 0;
switch (id) {
@@ -190,11 +191,11 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_STANDBY:
- if (rdev->desc->id > ACT8945A_ID_DCDC3)
+ if (id > ACT8945A_ID_DCDC3)
val = BIT(5);
break;
case REGULATOR_MODE_NORMAL:
- if (rdev->desc->id <= ACT8945A_ID_DCDC3)
+ if (id <= ACT8945A_ID_DCDC3)
val = BIT(5);
break;
default:
@@ -213,7 +214,7 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
static unsigned int act8945a_get_mode(struct regulator_dev *rdev)
{
struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev);
- int id = rdev->desc->id;
+ int id = rdev_get_id(rdev);
if (id < ACT8945A_ID_DCDC1 || id >= ACT8945A_ID_MAX)
return -EINVAL;
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index b9a93049e41e..bf3ab405eed1 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -40,35 +40,10 @@ struct arizona_ldo1 {
struct gpio_desc *ena_gpiod;
};
-static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector >= rdev->desc->n_voltages)
- return -EINVAL;
-
- if (selector == rdev->desc->n_voltages - 1)
- return 1800000;
- else
- return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
-}
-
-static int arizona_ldo1_hc_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int sel;
-
- sel = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
- if (sel >= rdev->desc->n_voltages)
- sel = rdev->desc->n_voltages - 1;
-
- return sel;
-}
-
static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
- struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev);
- struct regmap *regmap = ldo->regmap;
+ struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
@@ -85,16 +60,12 @@ static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev,
if (val)
return 0;
- val = sel << ARIZONA_LDO1_VSEL_SHIFT;
-
- return regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_1,
- ARIZONA_LDO1_VSEL_MASK, val);
+ return regulator_set_voltage_sel_regmap(rdev, sel);
}
static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
{
- struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev);
- struct regmap *regmap = ldo->regmap;
+ struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
@@ -105,32 +76,35 @@ static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
if (val & ARIZONA_LDO1_HI_PWR)
return rdev->desc->n_voltages - 1;
- ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_1, &val);
- if (ret != 0)
- return ret;
-
- return (val & ARIZONA_LDO1_VSEL_MASK) >> ARIZONA_LDO1_VSEL_SHIFT;
+ return regulator_get_voltage_sel_regmap(rdev);
}
static const struct regulator_ops arizona_ldo1_hc_ops = {
- .list_voltage = arizona_ldo1_hc_list_voltage,
- .map_voltage = arizona_ldo1_hc_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = arizona_ldo1_hc_get_voltage_sel,
.set_voltage_sel = arizona_ldo1_hc_set_voltage_sel,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
};
+static const struct regulator_linear_range arizona_ldo1_hc_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0, 0x6, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x7, 0x7, 0),
+};
+
static const struct regulator_desc arizona_ldo1_hc = {
.name = "LDO1",
.supply_name = "LDOVDD",
.type = REGULATOR_VOLTAGE,
.ops = &arizona_ldo1_hc_ops,
+ .vsel_reg = ARIZONA_LDO1_CONTROL_1,
+ .vsel_mask = ARIZONA_LDO1_VSEL_MASK,
.bypass_reg = ARIZONA_LDO1_CONTROL_1,
.bypass_mask = ARIZONA_LDO1_BYPASS,
- .min_uV = 900000,
- .uV_step = 50000,
+ .linear_ranges = arizona_ldo1_hc_ranges,
+ .n_linear_ranges = ARRAY_SIZE(arizona_ldo1_hc_ranges),
.n_voltages = 8,
.enable_time = 1500,
.ramp_delay = 24000,
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 66337e12719b..e5fed289b52d 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -886,7 +886,7 @@ static int as3722_regulator_probe(struct platform_device *pdev)
as3722_regs->desc[id].min_uV = 410000;
} else {
as3722_regs->desc[id].n_voltages =
- AS3722_SD0_VSEL_MAX + 1,
+ AS3722_SD0_VSEL_MAX + 1;
as3722_regs->desc[id].min_uV = 610000;
}
as3722_regs->desc[id].uV_step = 10000;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 48af859fd053..fba8f58ab769 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -367,13 +367,12 @@ static const int axp209_dcdc2_ldo3_slew_rates[] = {
static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
- const struct regulator_desc *desc = rdev->desc;
+ const struct regulator_desc *desc;
u8 reg, mask, enable, cfg = 0xff;
const int *slew_rates;
int rate_count = 0;
- if (!rdev)
- return -EINVAL;
+ desc = rdev->desc;
switch (axp20x->variant) {
case AXP209_ID:
@@ -436,11 +435,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
static int axp20x_regulator_enable_regmap(struct regulator_dev *rdev)
{
struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
- const struct regulator_desc *desc = rdev->desc;
+ const struct regulator_desc *desc;
if (!rdev)
return -EINVAL;
+ desc = rdev->desc;
+
switch (axp20x->variant) {
case AXP209_ID:
if ((desc->id == AXP20X_LDO3) &&
@@ -573,7 +574,7 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP22X_DCDC3_V_OUT, AXP22X_DCDC3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC3_MASK),
AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
- AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT,
+ AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC4_MASK),
AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
AXP22X_DCDC5_V_OUT, AXP22X_DCDC5_V_OUT_MASK,
@@ -719,7 +720,7 @@ static const struct regulator_desc axp803_regulators[] = {
AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP803, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
- AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT,
+ AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP803, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
@@ -729,7 +730,7 @@ static const struct regulator_desc axp803_regulators[] = {
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC_RANGES(AXP803, DLDO2, "dldo2", "dldoin",
axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
- AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT,
+ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP803, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
@@ -744,7 +745,7 @@ static const struct regulator_desc axp803_regulators[] = {
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP803, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
- AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT,
+ AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
AXP_DESC(AXP803, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
AXP803_FLDO1_V_OUT, AXP803_FLDO1_V_OUT_MASK,
@@ -791,7 +792,7 @@ static const struct regulator_desc axp806_regulators[] = {
AXP806_DCDCA_V_CTRL, AXP806_DCDCA_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCA_MASK),
AXP_DESC(AXP806, DCDCB, "dcdcb", "vinb", 1000, 2550, 50,
- AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL,
+ AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCB_MASK),
AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc",
axp806_dcdca_ranges, AXP806_DCDCA_NUM_VOLTAGES,
@@ -817,7 +818,7 @@ static const struct regulator_desc axp806_regulators[] = {
AXP806_BLDO1_V_CTRL, AXP806_BLDO1_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO1_MASK),
AXP_DESC(AXP806, BLDO2, "bldo2", "bldoin", 700, 1900, 100,
- AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL,
+ AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL_MASK,
AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO2_MASK),
AXP_DESC(AXP806, BLDO3, "bldo3", "bldoin", 700, 1900, 100,
AXP806_BLDO3_V_CTRL, AXP806_BLDO3_V_CTRL_MASK,
@@ -952,7 +953,7 @@ static const struct regulator_desc axp813_regulators[] = {
AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
- AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT,
+ AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
@@ -962,7 +963,7 @@ static const struct regulator_desc axp813_regulators[] = {
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin",
axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
- AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT,
+ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
@@ -977,7 +978,7 @@ static const struct regulator_desc axp813_regulators[] = {
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
- AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT,
+ AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
/* to do / check ... */
AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 92d6d7b10cf7..e49c0a7d5dd5 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -242,8 +242,12 @@ static int bcm590xx_get_enable_register(int id)
case BCM590XX_REG_SDSR2:
reg = BCM590XX_SDSR2PMCTRL1;
break;
+ case BCM590XX_REG_VSR:
+ reg = BCM590XX_VSRPMCTRL1;
+ break;
case BCM590XX_REG_VBUS:
reg = BCM590XX_OTG_CTRL;
+ break;
}
diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c
new file mode 100644
index 000000000000..30e3ed430a8a
--- /dev/null
+++ b/drivers/regulator/bd70528-regulator.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 ROHM Semiconductors
+// bd70528-regulator.c ROHM BD70528MWV regulator driver
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd70528.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+#define BUCK_RAMPRATE_250MV 0
+#define BUCK_RAMPRATE_125MV 1
+#define BUCK_RAMP_MAX 250
+
+static const struct regulator_linear_range bd70528_buck1_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 600000),
+ REGULATOR_LINEAR_RANGE(2750000, 0x2, 0xf, 50000),
+};
+static const struct regulator_linear_range bd70528_buck2_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 300000),
+ REGULATOR_LINEAR_RANGE(1550000, 0x2, 0xd, 50000),
+ REGULATOR_LINEAR_RANGE(3000000, 0xe, 0xf, 300000),
+};
+static const struct regulator_linear_range bd70528_buck3_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0xd, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 0xe, 0xf, 0),
+};
+
+/* All LDOs have same voltage ranges */
+static const struct regulator_linear_range bd70528_ldo_volts[] = {
+ REGULATOR_LINEAR_RANGE(1650000, 0x0, 0x07, 50000),
+ REGULATOR_LINEAR_RANGE(2100000, 0x8, 0x0f, 100000),
+ REGULATOR_LINEAR_RANGE(2850000, 0x10, 0x19, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x19, 0x1f, 0),
+};
+
+/* Also both LEDs support same voltages */
+static const unsigned int led_volts[] = {
+ 20000, 30000
+};
+
+static int bd70528_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ if (ramp_delay > 0 && ramp_delay <= BUCK_RAMP_MAX) {
+ unsigned int ramp_value = BUCK_RAMPRATE_250MV;
+
+ if (ramp_delay <= 125)
+ ramp_value = BUCK_RAMPRATE_125MV;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ BD70528_MASK_BUCK_RAMP,
+ ramp_value << BD70528_SIFT_BUCK_RAMP);
+ }
+ dev_err(&rdev->dev, "%s: ramp_delay: %d not supported\n",
+ rdev->desc->name, ramp_delay);
+ return -EINVAL;
+}
+
+static int bd70528_led_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ int ret;
+
+ ret = regulator_is_enabled_regmap(rdev);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ return regulator_set_voltage_sel_regmap(rdev, sel);
+
+ dev_err(&rdev->dev,
+ "LED voltage change not allowed when led is enabled\n");
+
+ return -EBUSY;
+}
+
+static const struct regulator_ops bd70528_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd70528_set_ramp_delay,
+};
+
+static const struct regulator_ops bd70528_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd70528_set_ramp_delay,
+};
+
+static const struct regulator_ops bd70528_led_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = bd70528_led_set_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_desc bd70528_desc[] = {
+ {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_BUCK1,
+ .ops = &bd70528_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_buck1_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_buck1_volts),
+ .n_voltages = BD70528_BUCK_VOLTS,
+ .enable_reg = BD70528_REG_BUCK1_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_BUCK1_VOLT,
+ .vsel_mask = BD70528_MASK_BUCK_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_BUCK2,
+ .ops = &bd70528_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_buck2_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_buck2_volts),
+ .n_voltages = BD70528_BUCK_VOLTS,
+ .enable_reg = BD70528_REG_BUCK2_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_BUCK2_VOLT,
+ .vsel_mask = BD70528_MASK_BUCK_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_BUCK3,
+ .ops = &bd70528_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_buck3_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_buck3_volts),
+ .n_voltages = BD70528_BUCK_VOLTS,
+ .enable_reg = BD70528_REG_BUCK3_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_BUCK3_VOLT,
+ .vsel_mask = BD70528_MASK_BUCK_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LDO1,
+ .ops = &bd70528_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
+ .n_voltages = BD70528_LDO_VOLTS,
+ .enable_reg = BD70528_REG_LDO1_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_LDO1_VOLT,
+ .vsel_mask = BD70528_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LDO2,
+ .ops = &bd70528_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
+ .n_voltages = BD70528_LDO_VOLTS,
+ .enable_reg = BD70528_REG_LDO2_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_LDO2_VOLT,
+ .vsel_mask = BD70528_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LDO3,
+ .ops = &bd70528_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd70528_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts),
+ .n_voltages = BD70528_LDO_VOLTS,
+ .enable_reg = BD70528_REG_LDO3_EN,
+ .enable_mask = BD70528_MASK_RUN_EN,
+ .vsel_reg = BD70528_REG_LDO3_VOLT,
+ .vsel_mask = BD70528_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo_led1",
+ .of_match = of_match_ptr("LDO_LED1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LED1,
+ .ops = &bd70528_led_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &led_volts[0],
+ .n_voltages = ARRAY_SIZE(led_volts),
+ .enable_reg = BD70528_REG_LED_EN,
+ .enable_mask = BD70528_MASK_LED1_EN,
+ .vsel_reg = BD70528_REG_LED_VOLT,
+ .vsel_mask = BD70528_MASK_LED1_VOLT,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "ldo_led2",
+ .of_match = of_match_ptr("LDO_LED2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD70528_LED2,
+ .ops = &bd70528_led_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &led_volts[0],
+ .n_voltages = ARRAY_SIZE(led_volts),
+ .enable_reg = BD70528_REG_LED_EN,
+ .enable_mask = BD70528_MASK_LED2_EN,
+ .vsel_reg = BD70528_REG_LED_VOLT,
+ .vsel_mask = BD70528_MASK_LED2_VOLT,
+ .owner = THIS_MODULE,
+ },
+
+};
+
+static int bd70528_probe(struct platform_device *pdev)
+{
+ struct rohm_regmap_dev *bd70528;
+ int i;
+ struct regulator_config config = {
+ .dev = pdev->dev.parent,
+ };
+
+ bd70528 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd70528) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+
+ config.regmap = bd70528->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(bd70528_desc); i++) {
+ struct regulator_dev *rdev;
+
+ rdev = devm_regulator_register(&pdev->dev, &bd70528_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ bd70528_desc[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver bd70528_regulator = {
+ .driver = {
+ .name = "bd70528-pmic"
+ },
+ .probe = bd70528_probe,
+};
+
+module_platform_driver(bd70528_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD70528 voltage regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index b8dcdc21dc22..b2191be49670 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -79,7 +79,7 @@ static int bd718xx_set_voltage_sel_pickable_restricted(
return regulator_set_voltage_sel_pickable_regmap(rdev, sel);
}
-static struct regulator_ops bd718xx_pickable_range_ldo_ops = {
+static const struct regulator_ops bd718xx_pickable_range_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -88,7 +88,7 @@ static struct regulator_ops bd718xx_pickable_range_ldo_ops = {
.get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
};
-static struct regulator_ops bd718xx_pickable_range_buck_ops = {
+static const struct regulator_ops bd718xx_pickable_range_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -98,7 +98,7 @@ static struct regulator_ops bd718xx_pickable_range_buck_ops = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops bd718xx_ldo_regulator_ops = {
+static const struct regulator_ops bd718xx_ldo_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -107,7 +107,7 @@ static struct regulator_ops bd718xx_ldo_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
+static const struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -116,7 +116,7 @@ static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_ops bd718xx_buck_regulator_ops = {
+static const struct regulator_ops bd718xx_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -126,7 +126,7 @@ static struct regulator_ops bd718xx_buck_regulator_ops = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
+static const struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -137,7 +137,7 @@ static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops bd718xx_dvs_buck_regulator_ops = {
+static const struct regulator_ops bd718xx_dvs_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -350,6 +350,135 @@ static const struct reg_init bd71837_ldo6_inits[] = {
},
};
+#define NUM_DVS_BUCKS 4
+
+struct of_dvs_setting {
+ const char *prop;
+ unsigned int reg;
+};
+
+static int set_dvs_levels(const struct of_dvs_setting *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap)
+{
+ int ret, i;
+ unsigned int uv;
+
+ ret = of_property_read_u32(np, dvs->prop, &uv);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ return 0;
+ }
+
+ for (i = 0; i < desc->n_voltages; i++) {
+ ret = regulator_desc_list_voltage_linear_range(desc, i);
+ if (ret < 0)
+ continue;
+ if (ret == uv) {
+ i <<= ffs(desc->vsel_mask) - 1;
+ ret = regmap_update_bits(regmap, dvs->reg,
+ DVS_BUCK_RUN_MASK, i);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int buck4_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ const struct of_dvs_setting dvs[] = {
+ {
+ .prop = "rohm,dvs-run-voltage",
+ .reg = BD71837_REG_BUCK4_VOLT_RUN,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(dvs); i++) {
+ ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+static int buck3_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ const struct of_dvs_setting dvs[] = {
+ {
+ .prop = "rohm,dvs-run-voltage",
+ .reg = BD71837_REG_BUCK3_VOLT_RUN,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(dvs); i++) {
+ ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int buck2_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ const struct of_dvs_setting dvs[] = {
+ {
+ .prop = "rohm,dvs-run-voltage",
+ .reg = BD718XX_REG_BUCK2_VOLT_RUN,
+ },
+ {
+ .prop = "rohm,dvs-idle-voltage",
+ .reg = BD718XX_REG_BUCK2_VOLT_IDLE,
+ },
+ };
+
+
+
+ for (i = 0; i < ARRAY_SIZE(dvs); i++) {
+ ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int buck1_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ const struct of_dvs_setting dvs[] = {
+ {
+ .prop = "rohm,dvs-run-voltage",
+ .reg = BD718XX_REG_BUCK1_VOLT_RUN,
+ },
+ {
+ .prop = "rohm,dvs-idle-voltage",
+ .reg = BD718XX_REG_BUCK1_VOLT_IDLE,
+ },
+ {
+ .prop = "rohm,dvs-suspend-voltage",
+ .reg = BD718XX_REG_BUCK1_VOLT_SUSP,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(dvs); i++) {
+ ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
static const struct bd718xx_regulator_data bd71847_regulators[] = {
{
.desc = {
@@ -368,6 +497,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck1_set_hw_dvs_levels,
},
.init = {
.reg = BD718XX_REG_BUCK1_CTRL,
@@ -391,6 +521,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck2_set_hw_dvs_levels,
},
.init = {
.reg = BD718XX_REG_BUCK2_CTRL,
@@ -662,6 +793,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck1_set_hw_dvs_levels,
},
.init = {
.reg = BD718XX_REG_BUCK1_CTRL,
@@ -685,6 +817,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck2_set_hw_dvs_levels,
},
.init = {
.reg = BD718XX_REG_BUCK2_CTRL,
@@ -708,6 +841,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck3_set_hw_dvs_levels,
},
.init = {
.reg = BD71837_REG_BUCK3_CTRL,
@@ -731,6 +865,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.owner = THIS_MODULE,
+ .of_parse_cb = buck4_set_hw_dvs_levels,
},
.init = {
.reg = BD71837_REG_BUCK4_CTRL,
@@ -1029,6 +1164,7 @@ static int bd718xx_probe(struct platform_device *pdev)
};
int i, j, err;
+ bool use_snvs;
mfd = dev_get_drvdata(pdev->dev.parent);
if (!mfd) {
@@ -1055,27 +1191,28 @@ static int bd718xx_probe(struct platform_device *pdev)
BD718XX_REG_REGLOCK);
}
- /* At poweroff transition PMIC HW disables EN bit for regulators but
- * leaves SEL bit untouched. So if state transition from POWEROFF
- * is done to SNVS - then all power rails controlled by SW (having
- * SEL bit set) stay disabled as EN is cleared. This may result boot
- * failure if any crucial systems are powered by these rails.
- *
+ use_snvs = of_property_read_bool(pdev->dev.parent->of_node,
+ "rohm,reset-snvs-powered");
+
+ /*
* Change the next stage from poweroff to be READY instead of SNVS
* for all reset types because OTP loading at READY will clear SEL
* bit allowing HW defaults for power rails to be used
*/
- err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1,
- BD718XX_ON_REQ_POWEROFF_MASK |
- BD718XX_SWRESET_POWEROFF_MASK |
- BD718XX_WDOG_POWEROFF_MASK |
- BD718XX_KEY_L_POWEROFF_MASK,
- BD718XX_POWOFF_TO_RDY);
- if (err) {
- dev_err(&pdev->dev, "Failed to change reset target\n");
- goto err;
- } else {
- dev_dbg(&pdev->dev, "Changed all resets from SVNS to READY\n");
+ if (!use_snvs) {
+ err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1,
+ BD718XX_ON_REQ_POWEROFF_MASK |
+ BD718XX_SWRESET_POWEROFF_MASK |
+ BD718XX_WDOG_POWEROFF_MASK |
+ BD718XX_KEY_L_POWEROFF_MASK,
+ BD718XX_POWOFF_TO_RDY);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to change reset target\n");
+ goto err;
+ } else {
+ dev_dbg(&pdev->dev,
+ "Changed all resets from SVNS to READY\n");
+ }
}
for (i = 0; i < pmic_regulators[mfd->chip_type].r_amount; i++) {
@@ -1098,19 +1235,33 @@ static int bd718xx_probe(struct platform_device *pdev)
err = PTR_ERR(rdev);
goto err;
}
- /* Regulator register gets the regulator constraints and
+
+ /*
+ * Regulator register gets the regulator constraints and
* applies them (set_machine_constraints). This should have
* turned the control register(s) to correct values and we
* can now switch the control from PMIC state machine to the
* register interface
+ *
+ * At poweroff transition PMIC HW disables EN bit for
+ * regulators but leaves SEL bit untouched. So if state
+ * transition from POWEROFF is done to SNVS - then all power
+ * rails controlled by SW (having SEL bit set) stay disabled
+ * as EN is cleared. This will result boot failure if any
+ * crucial systems are powered by these rails. We don't
+ * enable SW control for crucial regulators if snvs state is
+ * used
*/
- err = regmap_update_bits(mfd->regmap, r->init.reg,
- r->init.mask, r->init.val);
- if (err) {
- dev_err(&pdev->dev,
- "Failed to write BUCK/LDO SEL bit for (%s)\n",
- desc->name);
- goto err;
+ if (!use_snvs || !rdev->constraints->always_on ||
+ !rdev->constraints->boot_on) {
+ err = regmap_update_bits(mfd->regmap, r->init.reg,
+ r->init.mask, r->init.val);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to take control for (%s)\n",
+ desc->name);
+ goto err;
+ }
}
for (j = 0; j < r->additional_init_amnt; j++) {
err = regmap_update_bits(mfd->regmap,
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index e12dd1f750f3..e690c2ce5b3c 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -100,7 +100,7 @@ static int bd9571mwv_reg_set_voltage_sel_regmap(struct regulator_dev *rdev,
}
/* Operations permitted on AVS voltage regulator */
-static struct regulator_ops avs_ops = {
+static const struct regulator_ops avs_ops = {
.set_voltage_sel = bd9571mwv_avs_set_voltage_sel_regmap,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = bd9571mwv_avs_get_voltage_sel_regmap,
@@ -108,7 +108,7 @@ static struct regulator_ops avs_ops = {
};
/* Operations permitted on voltage regulators */
-static struct regulator_ops reg_ops = {
+static const struct regulator_ops reg_ops = {
.set_voltage_sel = bd9571mwv_reg_set_voltage_sel_regmap,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -116,13 +116,13 @@ static struct regulator_ops reg_ops = {
};
/* Operations permitted on voltage monitors */
-static struct regulator_ops vid_ops = {
+static const struct regulator_ops vid_ops = {
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_desc regulators[] = {
+static const struct regulator_desc regulators[] = {
BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
0x80, 600000, 10000, 0x3c),
BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b9d7b45c7295..68473d0cc57e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -23,7 +23,6 @@
#include <linux/mutex.h>
#include <linux/suspend.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/regmap.h>
@@ -82,7 +81,6 @@ struct regulator_enable_gpio {
struct gpio_desc *gpiod;
u32 enable_count; /* a number of enabled shared GPIO */
u32 request_count; /* a number of requested shared GPIO */
- unsigned int ena_gpio_invert:1;
};
/*
@@ -145,14 +143,6 @@ static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops)
return false;
}
-static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
-{
- if (rdev && rdev->supply)
- return rdev->supply->rdev;
-
- return NULL;
-}
-
/**
* regulator_lock_nested - lock a single regulator
* @rdev: regulator source
@@ -326,7 +316,7 @@ err_unlock:
* @rdev: regulator source
* @ww_ctx: w/w mutex acquire context
*
- * Unlock all regulators related with rdev by coupling or suppling.
+ * Unlock all regulators related with rdev by coupling or supplying.
*/
static void regulator_unlock_dependent(struct regulator_dev *rdev,
struct ww_acquire_ctx *ww_ctx)
@@ -341,7 +331,7 @@ static void regulator_unlock_dependent(struct regulator_dev *rdev,
* @ww_ctx: w/w mutex acquire context
*
* This function as a wrapper on regulator_lock_recursive(), which locks
- * all regulators related with rdev by coupling or suppling.
+ * all regulators related with rdev by coupling or supplying.
*/
static void regulator_lock_dependent(struct regulator_dev *rdev,
struct ww_acquire_ctx *ww_ctx)
@@ -924,14 +914,14 @@ static int drms_uA_update(struct regulator_dev *rdev)
int current_uA = 0, output_uV, input_uV, err;
unsigned int mode;
- lockdep_assert_held_once(&rdev->mutex.base);
-
/*
* first check to see if we can set modes at all, otherwise just
* tell the consumer everything is OK.
*/
- if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS)) {
+ rdev_dbg(rdev, "DRMS operation not allowed\n");
return 0;
+ }
if (!rdev->desc->ops->get_optimum_mode &&
!rdev->desc->ops->set_load)
@@ -1003,7 +993,7 @@ static int suspend_set_state(struct regulator_dev *rdev,
if (rstate == NULL)
return 0;
- /* If we have no suspend mode configration don't set anything;
+ /* If we have no suspend mode configuration don't set anything;
* only warn if the driver implements set_suspend_voltage or
* set_suspend_mode callback.
*/
@@ -1131,7 +1121,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
int current_uV = _regulator_get_voltage(rdev);
if (current_uV == -ENOTRECOVERABLE) {
- /* This regulator can't be read and must be initted */
+ /* This regulator can't be read and must be initialized */
rdev_info(rdev, "Setting %d-%duV\n",
rdev->constraints->min_uV,
rdev->constraints->max_uV);
@@ -1349,7 +1339,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
* We'll only apply the initial system load if an
* initial mode wasn't specified.
*/
+ regulator_lock(rdev);
drms_uA_update(rdev);
+ regulator_unlock(rdev);
}
if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
@@ -1780,7 +1772,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
struct device *dev = rdev->dev.parent;
int ret;
- /* No supply to resovle? */
+ /* No supply to resolve? */
if (!rdev->supply_name)
return 0;
@@ -2058,15 +2050,7 @@ static void _regulator_put(struct regulator *regulator)
debugfs_remove_recursive(regulator->debugfs);
if (regulator->dev) {
- int count = 0;
- struct regulator *r;
-
- list_for_each_entry(r, &rdev->consumer_list, list)
- if (r->dev == regulator->dev)
- count++;
-
- if (count == 1)
- device_link_remove(regulator->dev, &rdev->dev);
+ device_link_remove(regulator->dev, &rdev->dev);
/* remove any sysfs entries */
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
@@ -2237,38 +2221,21 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
{
struct regulator_enable_gpio *pin;
struct gpio_desc *gpiod;
- int ret;
- if (config->ena_gpiod)
- gpiod = config->ena_gpiod;
- else
- gpiod = gpio_to_desc(config->ena_gpio);
+ gpiod = config->ena_gpiod;
list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
if (pin->gpiod == gpiod) {
- rdev_dbg(rdev, "GPIO %d is already used\n",
- config->ena_gpio);
+ rdev_dbg(rdev, "GPIO is already used\n");
goto update_ena_gpio_to_rdev;
}
}
- if (!config->ena_gpiod) {
- ret = gpio_request_one(config->ena_gpio,
- GPIOF_DIR_OUT | config->ena_gpio_flags,
- rdev_get_name(rdev));
- if (ret)
- return ret;
- }
-
pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL);
- if (pin == NULL) {
- if (!config->ena_gpiod)
- gpio_free(config->ena_gpio);
+ if (pin == NULL)
return -ENOMEM;
- }
pin->gpiod = gpiod;
- pin->ena_gpio_invert = config->ena_gpio_invert;
list_add(&pin->list, &regulator_ena_gpio_list);
update_ena_gpio_to_rdev:
@@ -2289,7 +2256,6 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
if (pin->gpiod == rdev->ena_pin->gpiod) {
if (pin->request_count <= 1) {
pin->request_count = 0;
- gpiod_put(pin->gpiod);
list_del(&pin->list);
kfree(pin);
rdev->ena_pin = NULL;
@@ -2319,8 +2285,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
if (enable) {
/* Enable GPIO at initial use */
if (pin->enable_count == 0)
- gpiod_set_value_cansleep(pin->gpiod,
- !pin->ena_gpio_invert);
+ gpiod_set_value_cansleep(pin->gpiod, 1);
pin->enable_count++;
} else {
@@ -2331,8 +2296,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
/* Disable GPIO if not used */
if (pin->enable_count <= 1) {
- gpiod_set_value_cansleep(pin->gpiod,
- pin->ena_gpio_invert);
+ gpiod_set_value_cansleep(pin->gpiod, 0);
pin->enable_count = 0;
}
}
@@ -2409,7 +2373,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* timer wrapping.
* in case of multiple timer wrapping, either it can be
* detected by out-of-range remaining, or it cannot be
- * detected and we gets a panelty of
+ * detected and we get a penalty of
* _regulator_enable_delay().
*/
remaining = intended - start_jiffy;
@@ -2809,7 +2773,7 @@ static void regulator_disable_work(struct work_struct *work)
/**
* regulator_disable_deferred - disable regulator output with delay
* @regulator: regulator source
- * @ms: miliseconds until the regulator is disabled
+ * @ms: milliseconds until the regulator is disabled
*
* Execute regulator_disable() on the regulator after a delay. This
* is intended for use with devices that require some time to quiesce.
@@ -4943,7 +4907,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
* device tree until we have handled it over to the core. If the
* config that was passed in to this function DOES NOT contain
* a descriptor, and the config after this call DOES contain
- * a descriptor, we definately got one from parsing the device
+ * a descriptor, we definitely got one from parsing the device
* tree.
*/
if (!cfg->ena_gpiod && config->ena_gpiod)
@@ -4975,15 +4939,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
goto clean;
}
- if (config->ena_gpiod ||
- ((config->ena_gpio || config->ena_gpio_initialized) &&
- gpio_is_valid(config->ena_gpio))) {
+ if (config->ena_gpiod) {
mutex_lock(&regulator_list_mutex);
ret = regulator_ena_gpio_request(rdev, config);
mutex_unlock(&regulator_list_mutex);
if (ret != 0) {
- rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
- config->ena_gpio, ret);
+ rdev_err(rdev, "Failed to request enable GPIO: %d\n",
+ ret);
goto clean;
}
/* The regulator core took over the GPIO descriptor */
@@ -5251,6 +5213,12 @@ struct device *rdev_get_dev(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(rdev_get_dev);
+struct regmap *rdev_get_regmap(struct regulator_dev *rdev)
+{
+ return rdev->regmap;
+}
+EXPORT_SYMBOL_GPL(rdev_get_regmap);
+
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data)
{
return reg_init_data->driver_data;
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index 2131457937b7..e7dab5c4d1d1 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -100,12 +100,11 @@ struct cpcap_regulator {
struct regulator_desc rdesc;
const u16 assign_reg;
const u16 assign_mask;
- const u16 vsel_shift;
};
#define CPCAP_REG(_ID, reg, assignment_reg, assignment_mask, val_tbl, \
- mode_mask, volt_mask, volt_shft, \
- mode_val, off_val, volt_trans_time) { \
+ mode_mask, volt_mask, mode_val, off_val, \
+ volt_trans_time) { \
.rdesc = { \
.name = #_ID, \
.of_match = of_match_ptr(#_ID), \
@@ -127,7 +126,6 @@ struct cpcap_regulator {
}, \
.assign_reg = (assignment_reg), \
.assign_mask = (assignment_mask), \
- .vsel_shift = (volt_shft), \
}
struct cpcap_ddata {
@@ -336,155 +334,155 @@ static const unsigned int vaudio_val_tbl[] = { 0, 2775000, };
* SW1 to SW4 and SW6 seems to be unused for mapphone. Note that VSIM and
* VSIMCARD have a shared resource assignment bit.
*/
-static struct cpcap_regulator omap4_regulators[] = {
+static const struct cpcap_regulator omap4_regulators[] = {
CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW1_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW2_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW3_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW4_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW5_SEL, sw5_val_tbl,
- 0x28, 0, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0),
+ 0x28, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0),
CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW6_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VCAM_SEL, vcam_val_tbl,
- 0x87, 0x30, 4, 0x3, 0, 420),
+ 0x87, 0x30, 0x3, 0, 420),
CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VCSI_SEL, vcsi_val_tbl,
- 0x47, 0x10, 4, 0x43, 0x41, 350),
+ 0x47, 0x10, 0x43, 0x41, 350),
CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VDAC_SEL, vdac_val_tbl,
- 0x87, 0x30, 4, 0x3, 0, 420),
+ 0x87, 0x30, 0x3, 0, 420),
CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VDIG_SEL, vdig_val_tbl,
- 0x87, 0x30, 4, 0x82, 0, 420),
+ 0x87, 0x30, 0x82, 0, 420),
CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl,
- 0x80, 0xf, 0, 0x80, 0, 420),
+ 0x80, 0xf, 0x80, 0, 420),
CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl,
- 0x17, 0, 0, 0, 0x12, 0),
+ 0x17, 0, 0, 0x12, 0),
CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl,
- 0x87, 0x38, 3, 0x82, 0, 420),
+ 0x87, 0x38, 0x82, 0, 420),
CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VPLL_SEL, vpll_val_tbl,
- 0x43, 0x18, 3, 0x2, 0, 420),
+ 0x43, 0x18, 0x2, 0, 420),
CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRF1_SEL, vrf1_val_tbl,
- 0xac, 0x2, 1, 0x4, 0, 10),
+ 0xac, 0x2, 0x4, 0, 10),
CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRF2_SEL, vrf2_val_tbl,
- 0x23, 0x8, 3, 0, 0, 10),
+ 0x23, 0x8, 0, 0, 10),
CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl,
- 0x23, 0x8, 3, 0, 0, 420),
+ 0x23, 0x8, 0, 0, 420),
CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl,
- 0x47, 0x10, 4, 0, 0, 420),
+ 0x47, 0x10, 0, 0, 420),
CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl,
- 0x20c, 0xc0, 6, 0x20c, 0, 420),
+ 0x20c, 0xc0, 0x20c, 0, 420),
CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
0xffff, vsim_val_tbl,
- 0x23, 0x8, 3, 0x3, 0, 420),
+ 0x23, 0x8, 0x3, 0, 420),
CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
0xffff, vsimcard_val_tbl,
- 0x1e80, 0x8, 3, 0x1e00, 0, 420),
+ 0x1e80, 0x8, 0x1e00, 0, 420),
CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VVIB_SEL, vvib_val_tbl,
- 0x1, 0xc, 2, 0x1, 0, 500),
+ 0x1, 0xc, 0x1, 0, 500),
CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VUSB_SEL, vusb_val_tbl,
- 0x11c, 0x40, 6, 0xc, 0, 0),
+ 0x11c, 0x40, 0xc, 0, 0),
CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4,
CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl,
- 0x16, 0x1, 0, 0x4, 0, 0),
+ 0x16, 0x1, 0x4, 0, 0),
{ /* sentinel */ },
};
-static struct cpcap_regulator xoom_regulators[] = {
+static const struct cpcap_regulator xoom_regulators[] = {
CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW1_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW2_SEL, sw2_sw4_val_tbl,
- 0xf00, 0x7f, 0, 0x800, 0, 120),
+ 0xf00, 0x7f, 0x800, 0, 120),
CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW3_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW4_SEL, sw2_sw4_val_tbl,
- 0xf00, 0x7f, 0, 0x900, 0, 100),
+ 0xf00, 0x7f, 0x900, 0, 100),
CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW5_SEL, sw5_val_tbl,
- 0x2a, 0, 0, 0x22, 0, 0),
+ 0x2a, 0, 0x22, 0, 0),
CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2,
CPCAP_BIT_SW6_SEL, unknown_val_tbl,
- 0, 0, 0, 0, 0, 0),
+ 0, 0, 0, 0, 0),
CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VCAM_SEL, vcam_val_tbl,
- 0x87, 0x30, 4, 0x7, 0, 420),
+ 0x87, 0x30, 0x7, 0, 420),
CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VCSI_SEL, vcsi_val_tbl,
- 0x47, 0x10, 4, 0x7, 0, 350),
+ 0x47, 0x10, 0x7, 0, 350),
CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VDAC_SEL, vdac_val_tbl,
- 0x87, 0x30, 4, 0x3, 0, 420),
+ 0x87, 0x30, 0x3, 0, 420),
CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VDIG_SEL, vdig_val_tbl,
- 0x87, 0x30, 4, 0x5, 0, 420),
+ 0x87, 0x30, 0x5, 0, 420),
CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl,
- 0x80, 0xf, 0, 0x80, 0, 420),
+ 0x80, 0xf, 0x80, 0, 420),
CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl,
- 0x17, 0, 0, 0x2, 0, 0),
+ 0x17, 0, 0x2, 0, 0),
CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2,
CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl,
- 0x87, 0x38, 3, 0x2, 0, 420),
+ 0x87, 0x38, 0x2, 0, 420),
CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VPLL_SEL, vpll_val_tbl,
- 0x43, 0x18, 3, 0x1, 0, 420),
+ 0x43, 0x18, 0x1, 0, 420),
CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRF1_SEL, vrf1_val_tbl,
- 0xac, 0x2, 1, 0xc, 0, 10),
+ 0xac, 0x2, 0xc, 0, 10),
CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRF2_SEL, vrf2_val_tbl,
- 0x23, 0x8, 3, 0x3, 0, 10),
+ 0x23, 0x8, 0x3, 0, 10),
CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl,
- 0x23, 0x8, 3, 0x3, 0, 420),
+ 0x23, 0x8, 0x3, 0, 420),
CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl,
- 0x47, 0x10, 4, 0x5, 0, 420),
+ 0x47, 0x10, 0x5, 0, 420),
CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl,
- 0x20c, 0xc0, 6, 0x8, 0, 420),
+ 0x20c, 0xc0, 0x8, 0, 420),
CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
0xffff, vsim_val_tbl,
- 0x23, 0x8, 3, 0x3, 0, 420),
+ 0x23, 0x8, 0x3, 0, 420),
CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3,
0xffff, vsimcard_val_tbl,
- 0x1e80, 0x8, 3, 0x1e00, 0, 420),
+ 0x1e80, 0x8, 0x1e00, 0, 420),
CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VVIB_SEL, vvib_val_tbl,
- 0x1, 0xc, 2, 0, 0x1, 500),
+ 0x1, 0xc, 0, 0x1, 500),
CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3,
CPCAP_BIT_VUSB_SEL, vusb_val_tbl,
- 0x11c, 0x40, 6, 0xc, 0, 0),
+ 0x11c, 0x40, 0xc, 0, 0),
CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4,
CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl,
- 0x16, 0x1, 0, 0x4, 0, 0),
+ 0x16, 0x1, 0x4, 0, 0),
{ /* sentinel */ },
};
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 207cb3859dcc..cefa3558236d 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -290,10 +290,10 @@ static const struct regulator_ops da9052_ldo_ops = {
.disable = regulator_disable_regmap,
};
-#define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \
+#define DA9052_LDO(_id, _name, step, min, max, sbits, ebits, abits) \
{\
.reg_desc = {\
- .name = #_id,\
+ .name = #_name,\
.ops = &da9052_ldo_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9052_ID_##_id,\
@@ -310,10 +310,10 @@ static const struct regulator_ops da9052_ldo_ops = {
.activate_bit = (abits),\
}
-#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
+#define DA9052_DCDC(_id, _name, step, min, max, sbits, ebits, abits) \
{\
.reg_desc = {\
- .name = #_id,\
+ .name = #_name,\
.ops = &da9052_dcdc_ops,\
.type = REGULATOR_VOLTAGE,\
.id = DA9052_ID_##_id,\
@@ -331,37 +331,37 @@ static const struct regulator_ops da9052_ldo_ops = {
}
static struct da9052_regulator_info da9052_regulator_info[] = {
- DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
- DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
- DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
- DA9052_DCDC(BUCK4, 50, 1800, 3600, 5, 6, 0),
- DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0),
- DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
- DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
- DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0),
- DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0),
- DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_DCDC(BUCK4, buck4, 50, 1800, 3600, 5, 6, 0),
+ DA9052_LDO(LDO1, ldo1, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO(LDO2, ldo2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO(LDO3, ldo3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(LDO4, ldo4, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(LDO5, ldo5, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO6, ldo6, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO7, ldo7, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO8, ldo8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO9, ldo9, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0),
};
static struct da9052_regulator_info da9053_regulator_info[] = {
- DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
- DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
- DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
- DA9052_DCDC(BUCK4, 25, 950, 2525, 6, 6, 0),
- DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0),
- DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
- DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
- DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0),
- DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0),
- DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0),
- DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_DCDC(BUCK4, buck4, 25, 950, 2525, 6, 6, 0),
+ DA9052_LDO(LDO1, ldo1, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO(LDO2, ldo2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO(LDO3, ldo3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(LDO4, ldo4, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(LDO5, ldo5, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO6, ldo6, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO7, ldo7, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO8, ldo8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(LDO9, ldo9, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0),
};
static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 588c3d2445cf..3c6fac793658 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -48,7 +48,9 @@
#define DA9055_ID_LDO6 7
/* DA9055 BUCK current limit */
-static const int da9055_current_limits[] = { 500000, 600000, 700000, 800000 };
+static const unsigned int da9055_current_limits[] = {
+ 500000, 600000, 700000, 800000
+};
struct da9055_conf_reg {
int reg;
@@ -169,39 +171,6 @@ static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
val << volt.sl_shift);
}
-static int da9055_buck_get_current_limit(struct regulator_dev *rdev)
-{
- struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
- int ret;
-
- ret = da9055_reg_read(regulator->da9055, DA9055_REG_BUCK_LIM);
- if (ret < 0)
- return ret;
-
- ret &= info->mode.mask;
- return da9055_current_limits[ret >> info->mode.shift];
-}
-
-static int da9055_buck_set_current_limit(struct regulator_dev *rdev, int min_uA,
- int max_uA)
-{
- struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
- struct da9055_regulator_info *info = regulator->info;
- int i;
-
- for (i = ARRAY_SIZE(da9055_current_limits) - 1; i >= 0; i--) {
- if ((min_uA <= da9055_current_limits[i]) &&
- (da9055_current_limits[i] <= max_uA))
- return da9055_reg_update(regulator->da9055,
- DA9055_REG_BUCK_LIM,
- info->mode.mask,
- i << info->mode.shift);
- }
-
- return -EINVAL;
-}
-
static int da9055_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
@@ -329,8 +298,8 @@ static const struct regulator_ops da9055_buck_ops = {
.get_mode = da9055_buck_get_mode,
.set_mode = da9055_buck_set_mode,
- .get_current_limit = da9055_buck_get_current_limit,
- .set_current_limit = da9055_buck_set_current_limit,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
.get_voltage_sel = da9055_regulator_get_voltage_sel,
.set_voltage_sel = da9055_regulator_set_voltage_sel,
@@ -407,6 +376,10 @@ static const struct regulator_ops da9055_ldo_ops = {
.uV_step = (step) * 1000,\
.linear_min_sel = (voffset),\
.owner = THIS_MODULE,\
+ .curr_table = da9055_current_limits,\
+ .n_current_limits = ARRAY_SIZE(da9055_current_limits),\
+ .csel_reg = DA9055_REG_BUCK_LIM,\
+ .csel_mask = (mbits),\
},\
.conf = {\
.reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \
@@ -457,7 +430,6 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
int gpio_mux = pdata->gpio_ren[id];
config->ena_gpiod = pdata->ena_gpiods[id];
- config->ena_gpio_invert = 1;
/*
* GPI pin is muxed with regulator to control the
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index 34a70d9dc450..b064d8a19d4c 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -126,7 +126,7 @@ static int da9062_set_current_limit(struct regulator_dev *rdev,
const struct da9062_regulator_info *rinfo = regl->info;
int n, tval;
- for (n = 0; n < rinfo->n_current_limits; n++) {
+ for (n = rinfo->n_current_limits - 1; n >= 0; n--) {
tval = rinfo->current_limits[n];
if (tval >= min_ua && tval <= max_ua)
return regmap_field_write(regl->ilimit, n);
@@ -992,7 +992,6 @@ static int da9062_regulator_probe(struct platform_device *pdev)
struct regulator_config config = { };
const struct da9062_regulator_info *rinfo;
int irq, n, ret;
- size_t size;
int max_regulators;
switch (chip->chip_type) {
@@ -1010,9 +1009,8 @@ static int da9062_regulator_probe(struct platform_device *pdev)
}
/* Allocate memory required by usable regulators */
- size = sizeof(struct da9062_regulators) +
- max_regulators * sizeof(struct da9062_regulator);
- regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ regulators = devm_kzalloc(&pdev->dev, struct_size(regulators, regulator,
+ max_regulators), GFP_KERNEL);
if (!regulators)
return -ENOMEM;
@@ -1029,31 +1027,50 @@ static int da9062_regulator_probe(struct platform_device *pdev)
regl->desc.type = REGULATOR_VOLTAGE;
regl->desc.owner = THIS_MODULE;
- if (regl->info->mode.reg)
+ if (regl->info->mode.reg) {
regl->mode = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->mode);
- if (regl->info->suspend.reg)
+ if (IS_ERR(regl->mode))
+ return PTR_ERR(regl->mode);
+ }
+
+ if (regl->info->suspend.reg) {
regl->suspend = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->suspend);
- if (regl->info->sleep.reg)
+ if (IS_ERR(regl->suspend))
+ return PTR_ERR(regl->suspend);
+ }
+
+ if (regl->info->sleep.reg) {
regl->sleep = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->sleep);
- if (regl->info->suspend_sleep.reg)
+ if (IS_ERR(regl->sleep))
+ return PTR_ERR(regl->sleep);
+ }
+
+ if (regl->info->suspend_sleep.reg) {
regl->suspend_sleep = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->suspend_sleep);
- if (regl->info->ilimit.reg)
+ if (IS_ERR(regl->suspend_sleep))
+ return PTR_ERR(regl->suspend_sleep);
+ }
+
+ if (regl->info->ilimit.reg) {
regl->ilimit = devm_regmap_field_alloc(
&pdev->dev,
chip->regmap,
regl->info->ilimit);
+ if (IS_ERR(regl->ilimit))
+ return PTR_ERR(regl->ilimit);
+ }
/* Register regulator */
memset(&config, 0, sizeof(config));
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 8cbcd2a3eb20..2b0c7a85306a 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -167,7 +167,7 @@ static int da9063_set_current_limit(struct regulator_dev *rdev,
const struct da9063_regulator_info *rinfo = regl->info;
int n, tval;
- for (n = 0; n < rinfo->n_current_limits; n++) {
+ for (n = rinfo->n_current_limits - 1; n >= 0; n--) {
tval = rinfo->current_limits[n];
if (tval >= min_uA && tval <= max_uA)
return regmap_field_write(regl->ilimit, n);
@@ -739,7 +739,6 @@ static int da9063_regulator_probe(struct platform_device *pdev)
struct regulator_config config;
bool bcores_merged, bmem_bio_merged;
int id, irq, n, n_regulators, ret, val;
- size_t size;
regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL;
@@ -784,9 +783,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
n_regulators--; /* remove BMEM_BIO_MERGED */
/* Allocate memory required by usable regulators */
- size = sizeof(struct da9063_regulators) +
- n_regulators * sizeof(struct da9063_regulator);
- regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ regulators = devm_kzalloc(&pdev->dev, struct_size(regulators,
+ regulator, n_regulators), GFP_KERNEL);
if (!regulators)
return -ENOMEM;
@@ -835,21 +833,40 @@ static int da9063_regulator_probe(struct platform_device *pdev)
regl->desc.type = REGULATOR_VOLTAGE;
regl->desc.owner = THIS_MODULE;
- if (regl->info->mode.reg)
+ if (regl->info->mode.reg) {
regl->mode = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->mode);
- if (regl->info->suspend.reg)
+ if (IS_ERR(regl->mode))
+ return PTR_ERR(regl->mode);
+ }
+
+ if (regl->info->suspend.reg) {
regl->suspend = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->suspend);
- if (regl->info->sleep.reg)
+ if (IS_ERR(regl->suspend))
+ return PTR_ERR(regl->suspend);
+ }
+
+ if (regl->info->sleep.reg) {
regl->sleep = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->sleep);
- if (regl->info->suspend_sleep.reg)
+ if (IS_ERR(regl->sleep))
+ return PTR_ERR(regl->sleep);
+ }
+
+ if (regl->info->suspend_sleep.reg) {
regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->suspend_sleep);
- if (regl->info->ilimit.reg)
+ if (IS_ERR(regl->suspend_sleep))
+ return PTR_ERR(regl->suspend_sleep);
+ }
+
+ if (regl->info->ilimit.reg) {
regl->ilimit = devm_regmap_field_alloc(&pdev->dev,
da9063->regmap, regl->info->ilimit);
+ if (IS_ERR(regl->ilimit))
+ return PTR_ERR(regl->ilimit);
+ }
/* Register regulator */
memset(&config, 0, sizeof(config));
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 84dba64ed11e..528303771723 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -41,10 +41,6 @@ static const struct regmap_config da9210_regmap_config = {
.val_bits = 8,
};
-static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA,
- int max_uA);
-static int da9210_get_current_limit(struct regulator_dev *rdev);
-
static const struct regulator_ops da9210_buck_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -52,8 +48,8 @@ static const struct regulator_ops da9210_buck_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = da9210_set_current_limit,
- .get_current_limit = da9210_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
/* Default limits measured in millivolts and milliamps */
@@ -62,7 +58,7 @@ static const struct regulator_ops da9210_buck_ops = {
#define DA9210_STEP_MV 10
/* Current limits for buck (uA) indices corresponds with register values */
-static const int da9210_buck_limits[] = {
+static const unsigned int da9210_buck_limits[] = {
1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000,
3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000
};
@@ -80,47 +76,12 @@ static const struct regulator_desc da9210_reg = {
.enable_reg = DA9210_REG_BUCK_CONT,
.enable_mask = DA9210_BUCK_EN,
.owner = THIS_MODULE,
+ .curr_table = da9210_buck_limits,
+ .n_current_limits = ARRAY_SIZE(da9210_buck_limits),
+ .csel_reg = DA9210_REG_BUCK_ILIM,
+ .csel_mask = DA9210_BUCK_ILIM_MASK,
};
-static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA,
- int max_uA)
-{
- struct da9210 *chip = rdev_get_drvdata(rdev);
- unsigned int sel;
- int i;
-
- /* search for closest to maximum */
- for (i = ARRAY_SIZE(da9210_buck_limits)-1; i >= 0; i--) {
- if (min_uA <= da9210_buck_limits[i] &&
- max_uA >= da9210_buck_limits[i]) {
- sel = i;
- sel = sel << DA9210_BUCK_ILIM_SHIFT;
- return regmap_update_bits(chip->regmap,
- DA9210_REG_BUCK_ILIM,
- DA9210_BUCK_ILIM_MASK, sel);
- }
- }
-
- return -EINVAL;
-}
-
-static int da9210_get_current_limit(struct regulator_dev *rdev)
-{
- struct da9210 *chip = rdev_get_drvdata(rdev);
- unsigned int data;
- unsigned int sel;
- int ret;
-
- ret = regmap_read(chip->regmap, DA9210_REG_BUCK_ILIM, &data);
- if (ret < 0)
- return ret;
-
- /* select one of 16 values: 0000 (1600mA) to 1111 (4600mA) */
- sel = (data & DA9210_BUCK_ILIM_MASK) >> DA9210_BUCK_ILIM_SHIFT;
-
- return da9210_buck_limits[sel];
-}
-
static irqreturn_t da9210_irq_handler(int irq, void *data)
{
struct da9210 *chip = data;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index a3bc8037153e..771a06d1900d 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -40,7 +40,6 @@
/* VSEL bit definitions */
#define VSEL_BUCK_EN (1 << 7)
#define VSEL_MODE (1 << 6)
-#define VSEL_NSEL_MASK 0x3F
/* Chip ID and Verison */
#define DIE_ID 0x0F /* ID1 */
#define DIE_REV 0x0F /* ID2 */
@@ -49,14 +48,26 @@
#define CTL_SLEW_MASK (0x7 << 4)
#define CTL_SLEW_SHIFT 4
#define CTL_RESET (1 << 2)
+#define CTL_MODE_VSEL0_MODE BIT(0)
+#define CTL_MODE_VSEL1_MODE BIT(1)
#define FAN53555_NVOLTAGES 64 /* Numbers of voltages */
+#define FAN53526_NVOLTAGES 128
enum fan53555_vendor {
- FAN53555_VENDOR_FAIRCHILD = 0,
+ FAN53526_VENDOR_FAIRCHILD = 0,
+ FAN53555_VENDOR_FAIRCHILD,
FAN53555_VENDOR_SILERGY,
};
+enum {
+ FAN53526_CHIP_ID_01 = 1,
+};
+
+enum {
+ FAN53526_CHIP_REV_08 = 8,
+};
+
/* IC Type */
enum {
FAN53555_CHIP_ID_00 = 0,
@@ -94,8 +105,12 @@ struct fan53555_device_info {
/* Voltage range and step(linear) */
unsigned int vsel_min;
unsigned int vsel_step;
+ unsigned int vsel_count;
/* Voltage slew rate limiting */
unsigned int slew_rate;
+ /* Mode */
+ unsigned int mode_reg;
+ unsigned int mode_mask;
/* Sleep voltage cache */
unsigned int sleep_vol_cache;
};
@@ -111,7 +126,7 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
if (ret < 0)
return ret;
ret = regmap_update_bits(di->regmap, di->sleep_reg,
- VSEL_NSEL_MASK, ret);
+ di->desc.vsel_mask, ret);
if (ret < 0)
return ret;
/* Cache the sleep voltage setting.
@@ -143,11 +158,11 @@ static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- regmap_update_bits(di->regmap, di->vol_reg,
- VSEL_MODE, VSEL_MODE);
+ regmap_update_bits(di->regmap, di->mode_reg,
+ di->mode_mask, di->mode_mask);
break;
case REGULATOR_MODE_NORMAL:
- regmap_update_bits(di->regmap, di->vol_reg, VSEL_MODE, 0);
+ regmap_update_bits(di->regmap, di->vol_reg, di->mode_mask, 0);
break;
default:
return -EINVAL;
@@ -161,10 +176,10 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
unsigned int val;
int ret = 0;
- ret = regmap_read(di->regmap, di->vol_reg, &val);
+ ret = regmap_read(di->regmap, di->mode_reg, &val);
if (ret < 0)
return ret;
- if (val & VSEL_MODE)
+ if (val & di->mode_mask)
return REGULATOR_MODE_FAST;
else
return REGULATOR_MODE_NORMAL;
@@ -219,6 +234,34 @@ static const struct regulator_ops fan53555_regulator_ops = {
.set_suspend_disable = fan53555_set_suspend_disable,
};
+static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
+{
+ /* Init voltage range and step */
+ switch (di->chip_id) {
+ case FAN53526_CHIP_ID_01:
+ switch (di->chip_rev) {
+ case FAN53526_CHIP_REV_08:
+ di->vsel_min = 600000;
+ di->vsel_step = 6250;
+ break;
+ default:
+ dev_err(di->dev,
+ "Chip ID %d with rev %d not supported!\n",
+ di->chip_id, di->chip_rev);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(di->dev,
+ "Chip ID %d not supported!\n", di->chip_id);
+ return -EINVAL;
+ }
+
+ di->vsel_count = FAN53526_NVOLTAGES;
+
+ return 0;
+}
+
static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
{
/* Init voltage range and step */
@@ -257,6 +300,8 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
return -EINVAL;
}
+ di->vsel_count = FAN53555_NVOLTAGES;
+
return 0;
}
@@ -274,6 +319,8 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
return -EINVAL;
}
+ di->vsel_count = FAN53555_NVOLTAGES;
+
return 0;
}
@@ -302,7 +349,35 @@ static int fan53555_device_setup(struct fan53555_device_info *di,
return -EINVAL;
}
+ /* Setup mode control register */
+ switch (di->vendor) {
+ case FAN53526_VENDOR_FAIRCHILD:
+ di->mode_reg = FAN53555_CONTROL;
+
+ switch (pdata->sleep_vsel_id) {
+ case FAN53555_VSEL_ID_0:
+ di->mode_mask = CTL_MODE_VSEL1_MODE;
+ break;
+ case FAN53555_VSEL_ID_1:
+ di->mode_mask = CTL_MODE_VSEL0_MODE;
+ break;
+ }
+ break;
+ case FAN53555_VENDOR_FAIRCHILD:
+ case FAN53555_VENDOR_SILERGY:
+ di->mode_reg = di->vol_reg;
+ di->mode_mask = VSEL_MODE;
+ break;
+ default:
+ dev_err(di->dev, "vendor %d not supported!\n", di->vendor);
+ return -EINVAL;
+ }
+
+ /* Setup voltage range */
switch (di->vendor) {
+ case FAN53526_VENDOR_FAIRCHILD:
+ ret = fan53526_voltages_setup_fairchild(di);
+ break;
case FAN53555_VENDOR_FAIRCHILD:
ret = fan53555_voltages_setup_fairchild(di);
break;
@@ -326,13 +401,13 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->supply_name = "vin";
rdesc->ops = &fan53555_regulator_ops;
rdesc->type = REGULATOR_VOLTAGE;
- rdesc->n_voltages = FAN53555_NVOLTAGES;
+ rdesc->n_voltages = di->vsel_count;
rdesc->enable_reg = di->vol_reg;
rdesc->enable_mask = VSEL_BUCK_EN;
rdesc->min_uV = di->vsel_min;
rdesc->uV_step = di->vsel_step;
rdesc->vsel_reg = di->vol_reg;
- rdesc->vsel_mask = VSEL_NSEL_MASK;
+ rdesc->vsel_mask = di->vsel_count - 1;
rdesc->owner = THIS_MODULE;
di->rdev = devm_regulator_register(di->dev, &di->desc, config);
@@ -368,6 +443,9 @@ static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev,
static const struct of_device_id fan53555_dt_ids[] = {
{
+ .compatible = "fcs,fan53526",
+ .data = (void *)FAN53526_VENDOR_FAIRCHILD,
+ }, {
.compatible = "fcs,fan53555",
.data = (void *)FAN53555_VENDOR_FAIRCHILD
}, {
@@ -412,11 +490,13 @@ static int fan53555_regulator_probe(struct i2c_client *client,
} else {
/* if no ramp constraint set, get the pdata ramp_delay */
if (!di->regulator->constraints.ramp_delay) {
- int slew_idx = (pdata->slew_rate & 0x7)
- ? pdata->slew_rate : 0;
+ if (pdata->slew_rate >= ARRAY_SIZE(slew_rates)) {
+ dev_err(&client->dev, "Invalid slew_rate\n");
+ return -EINVAL;
+ }
di->regulator->constraints.ramp_delay
- = slew_rates[slew_idx];
+ = slew_rates[pdata->slew_rate];
}
di->vendor = id->driver_data;
@@ -467,6 +547,9 @@ static int fan53555_regulator_probe(struct i2c_client *client,
static const struct i2c_device_id fan53555_id[] = {
{
+ .name = "fan53526",
+ .driver_data = FAN53526_VENDOR_FAIRCHILD
+ }, {
.name = "fan53555",
.driver_data = FAN53555_VENDOR_FAIRCHILD
}, {
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 9abdb9130766..b5afc9db2c61 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -79,15 +79,6 @@ of_get_fixed_voltage_config(struct device *dev,
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
- /*
- * FIXME: we pulled active low/high and open drain handling into
- * gpiolib so it will be handled there. Delete this in the second
- * step when we also remove the custom inversion handling for all
- * legacy boardfiles.
- */
- config->enable_high = 1;
- config->gpio_is_open_drain = 0;
-
if (of_find_property(np, "vin-supply", NULL))
config->input_supply = "vin";
@@ -151,24 +142,14 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.fixed_uV = config->microvolts;
- cfg.ena_gpio_invert = !config->enable_high;
- if (config->enabled_at_boot) {
- if (config->enable_high)
- gflags = GPIOD_OUT_HIGH;
- else
- gflags = GPIOD_OUT_LOW;
- } else {
- if (config->enable_high)
- gflags = GPIOD_OUT_LOW;
- else
- gflags = GPIOD_OUT_HIGH;
- }
- if (config->gpio_is_open_drain) {
- if (gflags == GPIOD_OUT_HIGH)
- gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
- else
- gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
- }
+ /*
+ * The signal will be inverted by the GPIO core if flagged so in the
+ * decriptor.
+ */
+ if (config->enabled_at_boot)
+ gflags = GPIOD_OUT_HIGH;
+ else
+ gflags = GPIOD_OUT_LOW;
/*
* Some fixed regulators share the enable line between two
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index b2f5ec4f658a..6157001df0a4 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -30,16 +30,15 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/gpio-regulator.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
struct gpio_regulator_data {
struct regulator_desc desc;
struct regulator_dev *dev;
- struct gpio *gpios;
+ struct gpio_desc **gpiods;
int nr_gpios;
struct gpio_regulator_state *states;
@@ -82,7 +81,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
for (ptr = 0; ptr < data->nr_gpios; ptr++) {
state = (target & (1 << ptr)) >> ptr;
- gpio_set_value_cansleep(data->gpios[ptr].gpio, state);
+ gpiod_set_value_cansleep(data->gpiods[ptr], state);
}
data->state = target;
@@ -119,7 +118,7 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
for (ptr = 0; ptr < data->nr_gpios; ptr++) {
state = (target & (1 << ptr)) >> ptr;
- gpio_set_value_cansleep(data->gpios[ptr].gpio, state);
+ gpiod_set_value_cansleep(data->gpiods[ptr], state);
}
data->state = target;
@@ -138,7 +137,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
{
struct gpio_regulator_config *config;
const char *regtype;
- int proplen, gpio, i;
+ int proplen, i;
+ int ngpios;
int ret;
config = devm_kzalloc(dev,
@@ -153,59 +153,36 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
config->supply_name = config->init_data->constraints.name;
- if (of_property_read_bool(np, "enable-active-high"))
- config->enable_high = true;
-
if (of_property_read_bool(np, "enable-at-boot"))
config->enabled_at_boot = true;
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
- config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
- if (config->enable_gpio < 0 && config->enable_gpio != -ENOENT)
- return ERR_PTR(config->enable_gpio);
-
- /* Fetch GPIOs. - optional property*/
- ret = of_gpio_count(np);
- if ((ret < 0) && (ret != -ENOENT))
- return ERR_PTR(ret);
-
- if (ret > 0) {
- config->nr_gpios = ret;
- config->gpios = devm_kcalloc(dev,
- config->nr_gpios, sizeof(struct gpio),
- GFP_KERNEL);
- if (!config->gpios)
+ /* Fetch GPIO init levels */
+ ngpios = gpiod_count(dev, NULL);
+ if (ngpios > 0) {
+ config->gflags = devm_kzalloc(dev,
+ sizeof(enum gpiod_flags)
+ * ngpios,
+ GFP_KERNEL);
+ if (!config->gflags)
return ERR_PTR(-ENOMEM);
- proplen = of_property_count_u32_elems(np, "gpios-states");
- /* optional property */
- if (proplen < 0)
- proplen = 0;
+ for (i = 0; i < ngpios; i++) {
+ u32 val;
- if (proplen > 0 && proplen != config->nr_gpios) {
- dev_warn(dev, "gpios <-> gpios-states mismatch\n");
- proplen = 0;
- }
+ ret = of_property_read_u32_index(np, "gpios-states", i,
+ &val);
- for (i = 0; i < config->nr_gpios; i++) {
- gpio = of_get_named_gpio(np, "gpios", i);
- if (gpio < 0) {
- if (gpio != -ENOENT)
- return ERR_PTR(gpio);
- break;
- }
- config->gpios[i].gpio = gpio;
- config->gpios[i].label = config->supply_name;
- if (proplen > 0) {
- of_property_read_u32_index(np, "gpios-states",
- i, &ret);
- if (ret)
- config->gpios[i].flags =
- GPIOF_OUT_INIT_HIGH;
- }
+ /* Default to high per specification */
+ if (ret)
+ config->gflags[i] = GPIOD_OUT_HIGH;
+ else
+ config->gflags[i] =
+ val ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
}
}
+ config->ngpios = ngpios;
/* Fetch states. */
proplen = of_property_count_u32_elems(np, "states");
@@ -251,59 +228,56 @@ static struct regulator_ops gpio_regulator_current_ops = {
static int gpio_regulator_probe(struct platform_device *pdev)
{
- struct gpio_regulator_config *config = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct gpio_regulator_config *config = dev_get_platdata(dev);
+ struct device_node *np = dev->of_node;
struct gpio_regulator_data *drvdata;
struct regulator_config cfg = { };
- int ptr, ret, state;
+ enum gpiod_flags gflags;
+ int ptr, ret, state, i;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data),
+ drvdata = devm_kzalloc(dev, sizeof(struct gpio_regulator_data),
GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
if (np) {
- config = of_get_gpio_regulator_config(&pdev->dev, np,
+ config = of_get_gpio_regulator_config(dev, np,
&drvdata->desc);
if (IS_ERR(config))
return PTR_ERR(config);
}
- drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
+ drvdata->desc.name = devm_kstrdup(dev, config->supply_name, GFP_KERNEL);
if (drvdata->desc.name == NULL) {
- dev_err(&pdev->dev, "Failed to allocate supply name\n");
+ dev_err(dev, "Failed to allocate supply name\n");
return -ENOMEM;
}
- if (config->nr_gpios != 0) {
- drvdata->gpios = kmemdup(config->gpios,
- config->nr_gpios * sizeof(struct gpio),
- GFP_KERNEL);
- if (drvdata->gpios == NULL) {
- dev_err(&pdev->dev, "Failed to allocate gpio data\n");
- ret = -ENOMEM;
- goto err_name;
- }
-
- drvdata->nr_gpios = config->nr_gpios;
- ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Could not obtain regulator setting GPIOs: %d\n",
- ret);
- goto err_memgpio;
- }
+ drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!drvdata->gpiods)
+ return -ENOMEM;
+ for (i = 0; i < config->ngpios; i++) {
+ drvdata->gpiods[i] = devm_gpiod_get_index(dev,
+ NULL,
+ i,
+ config->gflags[i]);
+ if (IS_ERR(drvdata->gpiods[i]))
+ return PTR_ERR(drvdata->gpiods[i]);
+ /* This is good to know */
+ gpiod_set_consumer_name(drvdata->gpiods[i], drvdata->desc.name);
}
+ drvdata->nr_gpios = config->ngpios;
- drvdata->states = kmemdup(config->states,
- config->nr_states *
- sizeof(struct gpio_regulator_state),
- GFP_KERNEL);
+ drvdata->states = devm_kmemdup(dev,
+ config->states,
+ config->nr_states *
+ sizeof(struct gpio_regulator_state),
+ GFP_KERNEL);
if (drvdata->states == NULL) {
- dev_err(&pdev->dev, "Failed to allocate state data\n");
- ret = -ENOMEM;
- goto err_stategpio;
+ dev_err(dev, "Failed to allocate state data\n");
+ return -ENOMEM;
}
drvdata->nr_states = config->nr_states;
@@ -322,61 +296,46 @@ static int gpio_regulator_probe(struct platform_device *pdev)
drvdata->desc.ops = &gpio_regulator_current_ops;
break;
default:
- dev_err(&pdev->dev, "No regulator type set\n");
- ret = -EINVAL;
- goto err_memstate;
+ dev_err(dev, "No regulator type set\n");
+ return -EINVAL;
}
/* build initial state from gpio init data. */
state = 0;
for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) {
- if (config->gpios[ptr].flags & GPIOF_OUT_INIT_HIGH)
+ if (config->gflags[ptr] == GPIOD_OUT_HIGH)
state |= (1 << ptr);
}
drvdata->state = state;
- cfg.dev = &pdev->dev;
+ cfg.dev = dev;
cfg.init_data = config->init_data;
cfg.driver_data = drvdata;
cfg.of_node = np;
- if (gpio_is_valid(config->enable_gpio)) {
- cfg.ena_gpio = config->enable_gpio;
- cfg.ena_gpio_initialized = true;
- }
- cfg.ena_gpio_invert = !config->enable_high;
- if (config->enabled_at_boot) {
- if (config->enable_high)
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- else
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
- } else {
- if (config->enable_high)
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
- else
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
- }
+ /*
+ * The signal will be inverted by the GPIO core if flagged so in the
+ * decriptor.
+ */
+ if (config->enabled_at_boot)
+ gflags = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE;
+ else
+ gflags = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE;
+
+ cfg.ena_gpiod = gpiod_get_optional(dev, "enable", gflags);
+ if (IS_ERR(cfg.ena_gpiod))
+ return PTR_ERR(cfg.ena_gpiod);
drvdata->dev = regulator_register(&drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
- dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- goto err_memstate;
+ dev_err(dev, "Failed to register regulator: %d\n", ret);
+ return ret;
}
platform_set_drvdata(pdev, drvdata);
return 0;
-
-err_memstate:
- kfree(drvdata->states);
-err_stategpio:
- gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
-err_memgpio:
- kfree(drvdata->gpios);
-err_name:
- kfree(drvdata->desc.name);
- return ret;
}
static int gpio_regulator_remove(struct platform_device *pdev)
@@ -385,13 +344,6 @@ static int gpio_regulator_remove(struct platform_device *pdev)
regulator_unregister(drvdata->dev);
- gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
-
- kfree(drvdata->states);
- kfree(drvdata->gpios);
-
- kfree(drvdata->desc.name);
-
return 0;
}
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 5686a1335bd3..32d3f0499e2d 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -594,28 +594,30 @@ int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
EXPORT_SYMBOL_GPL(regulator_list_voltage_pickable_linear_range);
/**
- * regulator_list_voltage_linear_range - List voltages for linear ranges
+ * regulator_desc_list_voltage_linear_range - List voltages for linear ranges
*
- * @rdev: Regulator device
+ * @desc: Regulator desc for regulator which volatges are to be listed
* @selector: Selector to convert into a voltage
*
* Regulators with a series of simple linear mappings between voltages
- * and selectors can set linear_ranges in the regulator descriptor and
- * then use this function as their list_voltage() operation,
+ * and selectors who have set linear_ranges in the regulator descriptor
+ * can use this function prior regulator registration to list voltages.
+ * This is useful when voltages need to be listed during device-tree
+ * parsing.
*/
-int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
- unsigned int selector)
+int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
+ unsigned int selector)
{
const struct regulator_linear_range *range;
int i;
- if (!rdev->desc->n_linear_ranges) {
- BUG_ON(!rdev->desc->n_linear_ranges);
+ if (!desc->n_linear_ranges) {
+ BUG_ON(!desc->n_linear_ranges);
return -EINVAL;
}
- for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
- range = &rdev->desc->linear_ranges[i];
+ for (i = 0; i < desc->n_linear_ranges; i++) {
+ range = &desc->linear_ranges[i];
if (!(selector >= range->min_sel &&
selector <= range->max_sel))
@@ -628,6 +630,23 @@ int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(regulator_desc_list_voltage_linear_range);
+
+/**
+ * regulator_list_voltage_linear_range - List voltages for linear ranges
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a series of simple linear mappings between voltages
+ * and selectors can set linear_ranges in the regulator descriptor and
+ * then use this function as their list_voltage() operation,
+ */
+int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ return regulator_desc_list_voltage_linear_range(rdev->desc, selector);
+}
EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range);
/**
@@ -761,3 +780,89 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
rdev->desc->active_discharge_mask, val);
}
EXPORT_SYMBOL_GPL(regulator_set_active_discharge_regmap);
+
+/**
+ * regulator_set_current_limit_regmap - set_current_limit for regmap users
+ *
+ * @rdev: regulator to operate on
+ * @min_uA: Lower bound for current limit
+ * @max_uA: Upper bound for current limit
+ *
+ * Regulators that use regmap for their register I/O can set curr_table,
+ * csel_reg and csel_mask fields in their descriptor and then use this
+ * as their set_current_limit operation, saving some code.
+ */
+int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ unsigned int n_currents = rdev->desc->n_current_limits;
+ int i, sel = -1;
+
+ if (n_currents == 0)
+ return -EINVAL;
+
+ if (rdev->desc->curr_table) {
+ const unsigned int *curr_table = rdev->desc->curr_table;
+ bool ascend = curr_table[n_currents - 1] > curr_table[0];
+
+ /* search for closest to maximum */
+ if (ascend) {
+ for (i = n_currents - 1; i >= 0; i--) {
+ if (min_uA <= curr_table[i] &&
+ curr_table[i] <= max_uA) {
+ sel = i;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < n_currents; i++) {
+ if (min_uA <= curr_table[i] &&
+ curr_table[i] <= max_uA) {
+ sel = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (sel < 0)
+ return -EINVAL;
+
+ sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
+ rdev->desc->csel_mask, sel);
+}
+EXPORT_SYMBOL_GPL(regulator_set_current_limit_regmap);
+
+/**
+ * regulator_get_current_limit_regmap - get_current_limit for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * csel_reg and csel_mask fields in their descriptor and then use this
+ * as their get_current_limit operation, saving some code.
+ */
+int regulator_get_current_limit_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->csel_mask;
+ val >>= ffs(rdev->desc->csel_mask) - 1;
+
+ if (rdev->desc->curr_table) {
+ if (val >= rdev->desc->n_current_limits)
+ return -EINVAL;
+
+ return rdev->desc->curr_table[val];
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_get_current_limit_regmap);
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
index 36ae54b53814..bba24a6fdb1e 100644
--- a/drivers/regulator/hi655x-regulator.c
+++ b/drivers/regulator/hi655x-regulator.c
@@ -28,7 +28,6 @@
struct hi655x_regulator {
unsigned int disable_reg;
unsigned int status_reg;
- unsigned int ctrl_regs;
unsigned int ctrl_mask;
struct regulator_desc rdesc;
};
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 4abd8e9c81e5..6f28bba81d13 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -31,7 +31,6 @@
/* PMIC details */
struct isl_pmic {
struct i2c_client *client;
- struct regulator_dev *rdev[3];
struct mutex mtx;
};
@@ -66,14 +65,14 @@ static int isl6271a_set_voltage_sel(struct regulator_dev *dev,
return err;
}
-static struct regulator_ops isl_core_ops = {
+static const struct regulator_ops isl_core_ops = {
.get_voltage_sel = isl6271a_get_voltage_sel,
.set_voltage_sel = isl6271a_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
};
-static struct regulator_ops isl_fixed_ops = {
+static const struct regulator_ops isl_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
};
@@ -109,6 +108,7 @@ static const struct regulator_desc isl_rd[] = {
static int isl6271a_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ struct regulator_dev *rdev;
struct regulator_config config = { };
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct isl_pmic *pmic;
@@ -133,11 +133,10 @@ static int isl6271a_probe(struct i2c_client *i2c,
config.init_data = NULL;
config.driver_data = pmic;
- pmic->rdev[i] = devm_regulator_register(&i2c->dev, &isl_rd[i],
- &config);
- if (IS_ERR(pmic->rdev[i])) {
+ rdev = devm_regulator_register(&i2c->dev, &isl_rd[i], &config);
+ if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
- return PTR_ERR(pmic->rdev[i]);
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index 8c0e8419c43f..c876e161052a 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -258,6 +258,9 @@ static int lm363x_regulator_probe(struct platform_device *pdev)
* Register update is required if the pin is used.
*/
gpiod = lm363x_regulator_of_get_enable_gpio(dev, id);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
if (gpiod) {
cfg.ena_gpiod = gpiod;
@@ -265,8 +268,7 @@ static int lm363x_regulator_probe(struct platform_device *pdev)
LM3632_EXT_EN_MASK,
LM3632_EXT_EN_MASK);
if (ret) {
- if (gpiod)
- gpiod_put(gpiod);
+ gpiod_put(gpiod);
dev_err(dev, "External pin err: %d\n", ret);
return ret;
}
diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c
index 5a89e6d4b9a6..ff97cc50f2eb 100644
--- a/drivers/regulator/lochnagar-regulator.c
+++ b/drivers/regulator/lochnagar-regulator.c
@@ -194,7 +194,7 @@ static const struct regulator_desc lochnagar_regulators[] = {
.name = "VDDCORE",
.supply_name = "SYSVDD",
.type = REGULATOR_VOLTAGE,
- .n_voltages = 57,
+ .n_voltages = 66,
.ops = &lochnagar_vddcore_ops,
.id = LOCHNAGAR_VDDCORE,
@@ -226,14 +226,15 @@ static const struct of_device_id lochnagar_of_match[] = {
},
{
.compatible = "cirrus,lochnagar2-mic2vdd",
- .data = &lochnagar_regulators[LOCHNAGAR_MIC1VDD],
+ .data = &lochnagar_regulators[LOCHNAGAR_MIC2VDD],
},
{
.compatible = "cirrus,lochnagar2-vddcore",
.data = &lochnagar_regulators[LOCHNAGAR_VDDCORE],
},
- {},
+ {}
};
+MODULE_DEVICE_TABLE(of, lochnagar_of_match);
static int lochnagar_regulator_probe(struct platform_device *pdev)
{
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 204b5c5270e0..9e45112658ba 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -159,7 +159,7 @@ static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
selector << LDO_VOL_CONTR_SHIFT(ldo));
}
-static struct regulator_ops lp3971_ldo_ops = {
+static const struct regulator_ops lp3971_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3971_ldo_is_enabled,
@@ -233,7 +233,7 @@ static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
0 << BUCK_VOL_CHANGE_SHIFT(buck));
}
-static struct regulator_ops lp3971_dcdc_ops = {
+static const struct regulator_ops lp3971_dcdc_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3971_dcdc_is_enabled,
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index ff0c275f902e..fb098198b688 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -305,7 +305,7 @@ static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
return ret;
}
-static struct regulator_ops lp3972_ldo_ops = {
+static const struct regulator_ops lp3972_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3972_ldo_is_enabled,
@@ -386,7 +386,7 @@ static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
LP3972_VOL_CHANGE_FLAG_MASK, 0);
}
-static struct regulator_ops lp3972_dcdc_ops = {
+static const struct regulator_ops lp3972_dcdc_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3972_dcdc_is_enabled,
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 38992112fd6e..ca95257ce252 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -353,64 +353,6 @@ static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev)
return val & LP872X_VOUT_M;
}
-static int lp8725_buck_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct lp872x *lp = rdev_get_drvdata(rdev);
- enum lp872x_regulator_id buck = rdev_get_id(rdev);
- int i;
- u8 addr;
-
- switch (buck) {
- case LP8725_ID_BUCK1:
- addr = LP8725_BUCK1_VOUT2;
- break;
- case LP8725_ID_BUCK2:
- addr = LP8725_BUCK2_VOUT2;
- break;
- default:
- return -EINVAL;
- }
-
- for (i = ARRAY_SIZE(lp8725_buck_uA) - 1; i >= 0; i--) {
- if (lp8725_buck_uA[i] >= min_uA &&
- lp8725_buck_uA[i] <= max_uA)
- return lp872x_update_bits(lp, addr,
- LP8725_BUCK_CL_M,
- i << LP8725_BUCK_CL_S);
- }
-
- return -EINVAL;
-}
-
-static int lp8725_buck_get_current_limit(struct regulator_dev *rdev)
-{
- struct lp872x *lp = rdev_get_drvdata(rdev);
- enum lp872x_regulator_id buck = rdev_get_id(rdev);
- u8 addr, val;
- int ret;
-
- switch (buck) {
- case LP8725_ID_BUCK1:
- addr = LP8725_BUCK1_VOUT2;
- break;
- case LP8725_ID_BUCK2:
- addr = LP8725_BUCK2_VOUT2;
- break;
- default:
- return -EINVAL;
- }
-
- ret = lp872x_read_byte(lp, addr, &val);
- if (ret)
- return ret;
-
- val = (val & LP8725_BUCK_CL_M) >> LP8725_BUCK_CL_S;
-
- return (val < ARRAY_SIZE(lp8725_buck_uA)) ?
- lp8725_buck_uA[val] : -EINVAL;
-}
-
static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct lp872x *lp = rdev_get_drvdata(rdev);
@@ -478,7 +420,7 @@ static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev)
return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
-static struct regulator_ops lp872x_ldo_ops = {
+static const struct regulator_ops lp872x_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -489,7 +431,7 @@ static struct regulator_ops lp872x_ldo_ops = {
.enable_time = lp872x_regulator_enable_time,
};
-static struct regulator_ops lp8720_buck_ops = {
+static const struct regulator_ops lp8720_buck_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
@@ -502,7 +444,7 @@ static struct regulator_ops lp8720_buck_ops = {
.get_mode = lp872x_buck_get_mode,
};
-static struct regulator_ops lp8725_buck_ops = {
+static const struct regulator_ops lp8725_buck_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
@@ -513,11 +455,11 @@ static struct regulator_ops lp8725_buck_ops = {
.enable_time = lp872x_regulator_enable_time,
.set_mode = lp872x_buck_set_mode,
.get_mode = lp872x_buck_get_mode,
- .set_current_limit = lp8725_buck_set_current_limit,
- .get_current_limit = lp8725_buck_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
-static struct regulator_desc lp8720_regulator_desc[] = {
+static const struct regulator_desc lp8720_regulator_desc[] = {
{
.name = "ldo1",
.of_match = of_match_ptr("ldo1"),
@@ -602,7 +544,7 @@ static struct regulator_desc lp8720_regulator_desc[] = {
},
};
-static struct regulator_desc lp8725_regulator_desc[] = {
+static const struct regulator_desc lp8725_regulator_desc[] = {
{
.name = "ldo1",
.of_match = of_match_ptr("ldo1"),
@@ -712,6 +654,10 @@ static struct regulator_desc lp8725_regulator_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP872X_GENERAL_CFG,
.enable_mask = LP8725_BUCK1_EN_M,
+ .curr_table = lp8725_buck_uA,
+ .n_current_limits = ARRAY_SIZE(lp8725_buck_uA),
+ .csel_reg = LP8725_BUCK1_VOUT2,
+ .csel_mask = LP8725_BUCK_CL_M,
},
{
.name = "buck2",
@@ -724,6 +670,10 @@ static struct regulator_desc lp8725_regulator_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP872X_GENERAL_CFG,
.enable_mask = LP8725_BUCK2_EN_M,
+ .curr_table = lp8725_buck_uA,
+ .n_current_limits = ARRAY_SIZE(lp8725_buck_uA),
+ .csel_reg = LP8725_BUCK2_VOUT2,
+ .csel_mask = LP8725_BUCK_CL_M,
},
};
@@ -820,7 +770,7 @@ static struct regulator_init_data
static int lp872x_regulator_register(struct lp872x *lp)
{
- struct regulator_desc *desc;
+ const struct regulator_desc *desc;
struct regulator_config cfg = { };
struct regulator_dev *rdev;
int i;
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index 70e3df653381..b55de293ca7a 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -39,6 +39,10 @@
.ramp_delay = _delay, \
.linear_ranges = _lr, \
.n_linear_ranges = ARRAY_SIZE(_lr), \
+ .curr_table = lp873x_buck_uA, \
+ .n_current_limits = ARRAY_SIZE(lp873x_buck_uA), \
+ .csel_reg = (_cr), \
+ .csel_mask = LP873X_BUCK0_CTRL_2_BUCK0_ILIM,\
}, \
.ctrl2_reg = _cr, \
}
@@ -61,7 +65,7 @@ static const struct regulator_linear_range ldo0_ldo1_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000),
};
-static unsigned int lp873x_buck_ramp_delay[] = {
+static const unsigned int lp873x_buck_ramp_delay[] = {
30000, 15000, 10000, 7500, 3800, 1900, 940, 470
};
@@ -108,45 +112,8 @@ static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev,
return 0;
}
-static int lp873x_buck_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- int id = rdev_get_id(rdev);
- struct lp873x *lp873 = rdev_get_drvdata(rdev);
- int i;
-
- for (i = ARRAY_SIZE(lp873x_buck_uA) - 1; i >= 0; i--) {
- if (lp873x_buck_uA[i] >= min_uA &&
- lp873x_buck_uA[i] <= max_uA)
- return regmap_update_bits(lp873->regmap,
- regulators[id].ctrl2_reg,
- LP873X_BUCK0_CTRL_2_BUCK0_ILIM,
- i << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM));
- }
-
- return -EINVAL;
-}
-
-static int lp873x_buck_get_current_limit(struct regulator_dev *rdev)
-{
- int id = rdev_get_id(rdev);
- struct lp873x *lp873 = rdev_get_drvdata(rdev);
- int ret;
- unsigned int val;
-
- ret = regmap_read(lp873->regmap, regulators[id].ctrl2_reg, &val);
- if (ret)
- return ret;
-
- val = (val & LP873X_BUCK0_CTRL_2_BUCK0_ILIM) >>
- __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM);
-
- return (val < ARRAY_SIZE(lp873x_buck_uA)) ?
- lp873x_buck_uA[val] : -EINVAL;
-}
-
/* Operations permitted on BUCK0, BUCK1 */
-static struct regulator_ops lp873x_buck01_ops = {
+static const struct regulator_ops lp873x_buck01_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -156,12 +123,12 @@ static struct regulator_ops lp873x_buck01_ops = {
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = lp873x_buck_set_ramp_delay,
- .set_current_limit = lp873x_buck_set_current_limit,
- .get_current_limit = lp873x_buck_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
/* Operations permitted on LDO0 and LDO1 */
-static struct regulator_ops lp873x_ldo01_ops = {
+static const struct regulator_ops lp873x_ldo01_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 244822bb63cd..14fd38807134 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -315,7 +315,7 @@ out_i2c_error:
.vsel_mask = LP8755_BUCK_VOUT_M,\
}
-static struct regulator_desc lp8755_regulators[] = {
+static const struct regulator_desc lp8755_regulators[] = {
lp8755_buck_desc(0),
lp8755_buck_desc(1),
lp8755_buck_desc(2),
@@ -386,7 +386,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
if (ret < 0)
goto err_i2c;
- /* send OCP event to all regualtor devices */
+ /* send OCP event to all regulator devices */
if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if (pchip->rdev[icnt] != NULL)
@@ -394,7 +394,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
LP8755_EVENT_OCP,
NULL);
- /* send OVP event to all regualtor devices */
+ /* send OVP event to all regulator devices */
if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if (pchip->rdev[icnt] != NULL)
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index c192357d1dea..4ed41731a5b1 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -51,7 +51,7 @@ static const struct regulator_linear_range buck0_1_2_3_ranges[] = {
REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
};
-static unsigned int lp87565_buck_ramp_delay[] = {
+static const unsigned int lp87565_buck_ramp_delay[] = {
30000, 15000, 10000, 7500, 3800, 1900, 940, 470
};
@@ -140,7 +140,7 @@ static int lp87565_buck_get_current_limit(struct regulator_dev *rdev)
}
/* Operations permitted on BUCK0, BUCK1 */
-static struct regulator_ops lp87565_buck_ops = {
+static const struct regulator_ops lp87565_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index ec46290b647e..a7d30550bb5f 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -95,12 +95,10 @@ struct lp8788_buck {
void *dvs;
};
-/* BUCK 1 ~ 4 voltage table */
-static const int lp8788_buck_vtbl[] = {
- 500000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
- 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
- 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000,
- 1950000, 2000000,
+/* BUCK 1 ~ 4 voltage ranges */
+static const struct regulator_linear_range buck_volt_ranges[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 0, 0),
+ REGULATOR_LINEAR_RANGE(800000, 1, 25, 50000),
};
static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
@@ -345,8 +343,8 @@ static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev)
}
static const struct regulator_ops lp8788_buck12_ops = {
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_ascend,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = lp8788_buck12_set_voltage_sel,
.get_voltage_sel = lp8788_buck12_get_voltage_sel,
.enable = regulator_enable_regmap,
@@ -358,8 +356,8 @@ static const struct regulator_ops lp8788_buck12_ops = {
};
static const struct regulator_ops lp8788_buck34_ops = {
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_ascend,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
@@ -370,13 +368,14 @@ static const struct regulator_ops lp8788_buck34_ops = {
.get_mode = lp8788_buck_get_mode,
};
-static struct regulator_desc lp8788_buck_desc[] = {
+static const struct regulator_desc lp8788_buck_desc[] = {
{
.name = "buck1",
.id = BUCK1,
.ops = &lp8788_buck12_ops,
- .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
- .volt_table = lp8788_buck_vtbl,
+ .n_voltages = 26,
+ .linear_ranges = buck_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_BUCK,
@@ -386,8 +385,9 @@ static struct regulator_desc lp8788_buck_desc[] = {
.name = "buck2",
.id = BUCK2,
.ops = &lp8788_buck12_ops,
- .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
- .volt_table = lp8788_buck_vtbl,
+ .n_voltages = 26,
+ .linear_ranges = buck_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_BUCK,
@@ -397,8 +397,9 @@ static struct regulator_desc lp8788_buck_desc[] = {
.name = "buck3",
.id = BUCK3,
.ops = &lp8788_buck34_ops,
- .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
- .volt_table = lp8788_buck_vtbl,
+ .n_voltages = 26,
+ .linear_ranges = buck_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_BUCK3_VOUT,
@@ -410,8 +411,9 @@ static struct regulator_desc lp8788_buck_desc[] = {
.name = "buck4",
.id = BUCK4,
.ops = &lp8788_buck34_ops,
- .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
- .volt_table = lp8788_buck_vtbl,
+ .n_voltages = 26,
+ .linear_ranges = buck_volt_ranges,
+ .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.vsel_reg = LP8788_BUCK4_VOUT,
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 2ee22e7ea675..a2ef146e6b3a 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -186,7 +186,7 @@ static const struct regulator_ops lp8788_ldo_voltage_fixed_ops = {
.enable_time = lp8788_ldo_enable_time,
};
-static struct regulator_desc lp8788_dldo_desc[] = {
+static const struct regulator_desc lp8788_dldo_desc[] = {
{
.name = "dldo1",
.id = DLDO1,
@@ -343,7 +343,7 @@ static struct regulator_desc lp8788_dldo_desc[] = {
},
};
-static struct regulator_desc lp8788_aldo_desc[] = {
+static const struct regulator_desc lp8788_aldo_desc[] = {
{
.name = "aldo1",
.id = ALDO1,
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index 71fd0f2a4b76..e6d66e492b85 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -241,61 +241,10 @@ static struct regulator_desc ltc3676_regulators[LTC3676_NUM_REGULATORS] = {
LTC3676_FIXED_REG(LDO4, ldo4, LDOB, 2),
};
-static bool ltc3676_writeable_reg(struct device *dev, unsigned int reg)
+static bool ltc3676_readable_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case LTC3676_IRQSTAT:
- case LTC3676_BUCK1:
- case LTC3676_BUCK2:
- case LTC3676_BUCK3:
- case LTC3676_BUCK4:
- case LTC3676_LDOA:
- case LTC3676_LDOB:
- case LTC3676_SQD1:
- case LTC3676_SQD2:
- case LTC3676_CNTRL:
- case LTC3676_DVB1A:
- case LTC3676_DVB1B:
- case LTC3676_DVB2A:
- case LTC3676_DVB2B:
- case LTC3676_DVB3A:
- case LTC3676_DVB3B:
- case LTC3676_DVB4A:
- case LTC3676_DVB4B:
- case LTC3676_MSKIRQ:
- case LTC3676_MSKPG:
- case LTC3676_USER:
- case LTC3676_HRST:
- case LTC3676_CLIRQ:
- return true;
- }
- return false;
-}
-
-static bool ltc3676_readable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case LTC3676_IRQSTAT:
- case LTC3676_BUCK1:
- case LTC3676_BUCK2:
- case LTC3676_BUCK3:
- case LTC3676_BUCK4:
- case LTC3676_LDOA:
- case LTC3676_LDOB:
- case LTC3676_SQD1:
- case LTC3676_SQD2:
- case LTC3676_CNTRL:
- case LTC3676_DVB1A:
- case LTC3676_DVB1B:
- case LTC3676_DVB2A:
- case LTC3676_DVB2B:
- case LTC3676_DVB3A:
- case LTC3676_DVB3B:
- case LTC3676_DVB4A:
- case LTC3676_DVB4B:
- case LTC3676_MSKIRQ:
- case LTC3676_MSKPG:
- case LTC3676_USER:
+ case LTC3676_BUCK1 ... LTC3676_IRQSTAT:
case LTC3676_HRST:
case LTC3676_CLIRQ:
return true;
@@ -306,9 +255,7 @@ static bool ltc3676_readable_reg(struct device *dev, unsigned int reg)
static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case LTC3676_IRQSTAT:
- case LTC3676_PGSTATL:
- case LTC3676_PGSTATRT:
+ case LTC3676_IRQSTAT ... LTC3676_PGSTATRT:
return true;
}
return false;
@@ -317,8 +264,8 @@ static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg)
static const struct regmap_config ltc3676_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .writeable_reg = ltc3676_writeable_reg,
- .readable_reg = ltc3676_readable_reg,
+ .writeable_reg = ltc3676_readable_writeable_reg,
+ .readable_reg = ltc3676_readable_writeable_reg,
.volatile_reg = ltc3676_volatile_reg,
.max_register = LTC3676_CLIRQ,
.use_single_read = true,
@@ -442,5 +389,5 @@ static struct i2c_driver ltc3676_driver = {
module_i2c_driver(ltc3676_driver);
MODULE_AUTHOR("Tim Harvey <tharvey@gateworks.com>");
-MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC1376");
+MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3676");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index bc7f4751bf9c..85a88a9e4d42 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -324,4 +324,3 @@ module_exit(max14577_regulator_exit);
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index b94e3a721721..1607ac673e44 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -1,7 +1,7 @@
/*
* Maxim MAX77620 Regulator driver
*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
*
* Author: Mallikarjun Kasoju <mkasoju@nvidia.com>
* Laxman Dewangan <ldewangan@nvidia.com>
@@ -690,6 +690,7 @@ static const struct regulator_ops max77620_regulator_ops = {
.active_discharge_mask = MAX77620_SD_CFG1_ADE_MASK, \
.active_discharge_reg = MAX77620_REG_##_id##_CFG, \
.type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
}, \
}
@@ -721,6 +722,7 @@ static const struct regulator_ops max77620_regulator_ops = {
.active_discharge_mask = MAX77620_LDO_CFG2_ADE_MASK, \
.active_discharge_reg = MAX77620_REG_##_id##_CFG2, \
.type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
}, \
}
@@ -803,6 +805,14 @@ static int max77620_regulator_probe(struct platform_device *pdev)
rdesc = &rinfo[id].desc;
pmic->rinfo[id] = &max77620_regs_info[id];
pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
+ pmic->reg_pdata[id].active_fps_src = -1;
+ pmic->reg_pdata[id].active_fps_pd_slot = -1;
+ pmic->reg_pdata[id].active_fps_pu_slot = -1;
+ pmic->reg_pdata[id].suspend_fps_src = -1;
+ pmic->reg_pdata[id].suspend_fps_pd_slot = -1;
+ pmic->reg_pdata[id].suspend_fps_pu_slot = -1;
+ pmic->reg_pdata[id].power_ok = -1;
+ pmic->reg_pdata[id].ramp_rate_setting = -1;
ret = max77620_read_slew_rate(pmic, id);
if (ret < 0)
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
new file mode 100644
index 000000000000..31ebf34b01ec
--- /dev/null
+++ b/drivers/regulator/max77650-regulator.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// Regulator driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/of.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define MAX77650_REGULATOR_EN_CTRL_MASK GENMASK(3, 0)
+#define MAX77650_REGULATOR_EN_CTRL_BITS(_reg) \
+ ((_reg) & MAX77650_REGULATOR_EN_CTRL_MASK)
+#define MAX77650_REGULATOR_ENABLED GENMASK(2, 1)
+#define MAX77650_REGULATOR_DISABLED BIT(2)
+
+#define MAX77650_REGULATOR_V_LDO_MASK GENMASK(6, 0)
+#define MAX77650_REGULATOR_V_SBB_MASK GENMASK(5, 0)
+
+#define MAX77650_REGULATOR_AD_MASK BIT(3)
+#define MAX77650_REGULATOR_AD_DISABLED 0x00
+#define MAX77650_REGULATOR_AD_ENABLED BIT(3)
+
+#define MAX77650_REGULATOR_CURR_LIM_MASK GENMASK(7, 6)
+
+enum {
+ MAX77650_REGULATOR_ID_LDO = 0,
+ MAX77650_REGULATOR_ID_SBB0,
+ MAX77650_REGULATOR_ID_SBB1,
+ MAX77650_REGULATOR_ID_SBB2,
+ MAX77650_REGULATOR_NUM_REGULATORS,
+};
+
+struct max77650_regulator_desc {
+ struct regulator_desc desc;
+ unsigned int regA;
+ unsigned int regB;
+};
+
+static const u32 max77651_sbb1_regulator_volt_table[] = {
+ 2400000, 3200000, 4000000, 4800000,
+ 2450000, 3250000, 4050000, 4850000,
+ 2500000, 3300000, 4100000, 4900000,
+ 2550000, 3350000, 4150000, 4950000,
+ 2600000, 3400000, 4200000, 5000000,
+ 2650000, 3450000, 4250000, 5050000,
+ 2700000, 3500000, 4300000, 5100000,
+ 2750000, 3550000, 4350000, 5150000,
+ 2800000, 3600000, 4400000, 5200000,
+ 2850000, 3650000, 4450000, 5250000,
+ 2900000, 3700000, 4500000, 0,
+ 2950000, 3750000, 4550000, 0,
+ 3000000, 3800000, 4600000, 0,
+ 3050000, 3850000, 4650000, 0,
+ 3100000, 3900000, 4700000, 0,
+ 3150000, 3950000, 4750000, 0,
+};
+
+#define MAX77651_REGULATOR_SBB1_SEL_DEC(_val) \
+ (((_val & 0x3c) >> 2) | ((_val & 0x03) << 4))
+#define MAX77651_REGULATOR_SBB1_SEL_ENC(_val) \
+ (((_val & 0x30) >> 4) | ((_val & 0x0f) << 2))
+
+#define MAX77650_REGULATOR_SBB1_SEL_DECR(_val) \
+ do { \
+ _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \
+ _val--; \
+ _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \
+ } while (0)
+
+#define MAX77650_REGULATOR_SBB1_SEL_INCR(_val) \
+ do { \
+ _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \
+ _val++; \
+ _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \
+ } while (0)
+
+static const unsigned int max77650_current_limit_table[] = {
+ 1000000, 866000, 707000, 500000,
+};
+
+static int max77650_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct max77650_regulator_desc *rdesc;
+ struct regmap *map;
+ int val, rv, en;
+
+ rdesc = rdev_get_drvdata(rdev);
+ map = rdev_get_regmap(rdev);
+
+ rv = regmap_read(map, rdesc->regB, &val);
+ if (rv)
+ return rv;
+
+ en = MAX77650_REGULATOR_EN_CTRL_BITS(val);
+
+ return en != MAX77650_REGULATOR_DISABLED;
+}
+
+static int max77650_regulator_enable(struct regulator_dev *rdev)
+{
+ struct max77650_regulator_desc *rdesc;
+ struct regmap *map;
+
+ rdesc = rdev_get_drvdata(rdev);
+ map = rdev_get_regmap(rdev);
+
+ return regmap_update_bits(map, rdesc->regB,
+ MAX77650_REGULATOR_EN_CTRL_MASK,
+ MAX77650_REGULATOR_ENABLED);
+}
+
+static int max77650_regulator_disable(struct regulator_dev *rdev)
+{
+ struct max77650_regulator_desc *rdesc;
+ struct regmap *map;
+
+ rdesc = rdev_get_drvdata(rdev);
+ map = rdev_get_regmap(rdev);
+
+ return regmap_update_bits(map, rdesc->regB,
+ MAX77650_REGULATOR_EN_CTRL_MASK,
+ MAX77650_REGULATOR_DISABLED);
+}
+
+static int max77650_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ int rv = 0, curr, diff;
+ bool ascending;
+
+ /*
+ * If the regulator is disabled, we can program the desired
+ * voltage right away.
+ */
+ if (!max77650_regulator_is_enabled(rdev))
+ return regulator_set_voltage_sel_regmap(rdev, sel);
+
+ /*
+ * Otherwise we need to manually ramp the output voltage up/down
+ * one step at a time.
+ */
+
+ curr = regulator_get_voltage_sel_regmap(rdev);
+ if (curr < 0)
+ return curr;
+
+ diff = curr - sel;
+ if (diff == 0)
+ return 0; /* Already there. */
+ else if (diff > 0)
+ ascending = false;
+ else
+ ascending = true;
+
+ /*
+ * Make sure we'll get to the right voltage and break the loop even if
+ * the selector equals 0.
+ */
+ for (ascending ? curr++ : curr--;; ascending ? curr++ : curr--) {
+ rv = regulator_set_voltage_sel_regmap(rdev, curr);
+ if (rv)
+ return rv;
+
+ if (curr == sel)
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Special case: non-linear voltage table for max77651 SBB1 - software
+ * must ensure the voltage is ramped in 50mV increments.
+ */
+static int max77651_regulator_sbb1_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ int rv = 0, curr, vcurr, vdest, vdiff;
+
+ /*
+ * If the regulator is disabled, we can program the desired
+ * voltage right away.
+ */
+ if (!max77650_regulator_is_enabled(rdev))
+ return regulator_set_voltage_sel_regmap(rdev, sel);
+
+ curr = regulator_get_voltage_sel_regmap(rdev);
+ if (curr < 0)
+ return curr;
+
+ if (curr == sel)
+ return 0; /* Already there. */
+
+ vcurr = max77651_sbb1_regulator_volt_table[curr];
+ vdest = max77651_sbb1_regulator_volt_table[sel];
+ vdiff = vcurr - vdest;
+
+ for (;;) {
+ if (vdiff > 0)
+ MAX77650_REGULATOR_SBB1_SEL_DECR(curr);
+ else
+ MAX77650_REGULATOR_SBB1_SEL_INCR(curr);
+
+ rv = regulator_set_voltage_sel_regmap(rdev, curr);
+ if (rv)
+ return rv;
+
+ if (curr == sel)
+ break;
+ };
+
+ return 0;
+}
+
+static const struct regulator_ops max77650_regulator_LDO_ops = {
+ .is_enabled = max77650_regulator_is_enabled,
+ .enable = max77650_regulator_enable,
+ .disable = max77650_regulator_disable,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = max77650_regulator_set_voltage_sel,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops max77650_regulator_SBB_ops = {
+ .is_enabled = max77650_regulator_is_enabled,
+ .enable = max77650_regulator_enable,
+ .disable = max77650_regulator_disable,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = max77650_regulator_set_voltage_sel,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+/* Special case for max77651 SBB1 - non-linear voltage mapping. */
+static const struct regulator_ops max77651_SBB1_regulator_ops = {
+ .is_enabled = max77650_regulator_is_enabled,
+ .enable = max77650_regulator_enable,
+ .disable = max77650_regulator_disable,
+ .list_voltage = regulator_list_voltage_table,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = max77651_regulator_sbb1_set_voltage_sel,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static struct max77650_regulator_desc max77650_LDO_desc = {
+ .desc = {
+ .name = "ldo",
+ .of_match = of_match_ptr("ldo"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-ldo",
+ .id = MAX77650_REGULATOR_ID_LDO,
+ .ops = &max77650_regulator_LDO_ops,
+ .min_uV = 1350000,
+ .uV_step = 12500,
+ .n_voltages = 128,
+ .vsel_mask = MAX77650_REGULATOR_V_LDO_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_LDO_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_LDO_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ .regA = MAX77650_REG_CNFG_LDO_A,
+ .regB = MAX77650_REG_CNFG_LDO_B,
+};
+
+static struct max77650_regulator_desc max77650_SBB0_desc = {
+ .desc = {
+ .name = "sbb0",
+ .of_match = of_match_ptr("sbb0"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb0",
+ .id = MAX77650_REGULATOR_ID_SBB0,
+ .ops = &max77650_regulator_SBB_ops,
+ .min_uV = 800000,
+ .uV_step = 25000,
+ .n_voltages = 64,
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB0_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB0_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB0_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB0_A,
+ .regB = MAX77650_REG_CNFG_SBB0_B,
+};
+
+static struct max77650_regulator_desc max77650_SBB1_desc = {
+ .desc = {
+ .name = "sbb1",
+ .of_match = of_match_ptr("sbb1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb1",
+ .id = MAX77650_REGULATOR_ID_SBB1,
+ .ops = &max77650_regulator_SBB_ops,
+ .min_uV = 800000,
+ .uV_step = 12500,
+ .n_voltages = 64,
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB1_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB1_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB1_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB1_A,
+ .regB = MAX77650_REG_CNFG_SBB1_B,
+};
+
+static struct max77650_regulator_desc max77651_SBB1_desc = {
+ .desc = {
+ .name = "sbb1",
+ .of_match = of_match_ptr("sbb1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb1",
+ .id = MAX77650_REGULATOR_ID_SBB1,
+ .ops = &max77651_SBB1_regulator_ops,
+ .volt_table = max77651_sbb1_regulator_volt_table,
+ .n_voltages = ARRAY_SIZE(max77651_sbb1_regulator_volt_table),
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB1_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB1_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB1_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB1_A,
+ .regB = MAX77650_REG_CNFG_SBB1_B,
+};
+
+static struct max77650_regulator_desc max77650_SBB2_desc = {
+ .desc = {
+ .name = "sbb2",
+ .of_match = of_match_ptr("sbb2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb0",
+ .id = MAX77650_REGULATOR_ID_SBB2,
+ .ops = &max77650_regulator_SBB_ops,
+ .min_uV = 800000,
+ .uV_step = 50000,
+ .n_voltages = 64,
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB2_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB2_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB2_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB2_A,
+ .regB = MAX77650_REG_CNFG_SBB2_B,
+};
+
+static struct max77650_regulator_desc max77651_SBB2_desc = {
+ .desc = {
+ .name = "sbb2",
+ .of_match = of_match_ptr("sbb2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .supply_name = "in-sbb0",
+ .id = MAX77650_REGULATOR_ID_SBB2,
+ .ops = &max77650_regulator_SBB_ops,
+ .min_uV = 2400000,
+ .uV_step = 50000,
+ .n_voltages = 64,
+ .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK,
+ .vsel_reg = MAX77650_REG_CNFG_SBB2_A,
+ .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED,
+ .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED,
+ .active_discharge_mask = MAX77650_REGULATOR_AD_MASK,
+ .active_discharge_reg = MAX77650_REG_CNFG_SBB2_B,
+ .enable_time = 100,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .csel_reg = MAX77650_REG_CNFG_SBB2_A,
+ .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK,
+ .curr_table = max77650_current_limit_table,
+ .n_current_limits = ARRAY_SIZE(max77650_current_limit_table),
+ },
+ .regA = MAX77650_REG_CNFG_SBB2_A,
+ .regB = MAX77650_REG_CNFG_SBB2_B,
+};
+
+static int max77650_regulator_probe(struct platform_device *pdev)
+{
+ struct max77650_regulator_desc **rdescs;
+ struct max77650_regulator_desc *rdesc;
+ struct regulator_config config = { };
+ struct device *dev, *parent;
+ struct regulator_dev *rdev;
+ struct regmap *map;
+ unsigned int val;
+ int i, rv;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+
+ if (!dev->of_node)
+ dev->of_node = parent->of_node;
+
+ rdescs = devm_kcalloc(dev, MAX77650_REGULATOR_NUM_REGULATORS,
+ sizeof(*rdescs), GFP_KERNEL);
+ if (!rdescs)
+ return -ENOMEM;
+
+ map = dev_get_regmap(parent, NULL);
+ if (!map)
+ return -ENODEV;
+
+ rv = regmap_read(map, MAX77650_REG_CID, &val);
+ if (rv)
+ return rv;
+
+ rdescs[MAX77650_REGULATOR_ID_LDO] = &max77650_LDO_desc;
+ rdescs[MAX77650_REGULATOR_ID_SBB0] = &max77650_SBB0_desc;
+
+ switch (MAX77650_CID_BITS(val)) {
+ case MAX77650_CID_77650A:
+ case MAX77650_CID_77650C:
+ rdescs[MAX77650_REGULATOR_ID_SBB1] = &max77650_SBB1_desc;
+ rdescs[MAX77650_REGULATOR_ID_SBB2] = &max77650_SBB2_desc;
+ break;
+ case MAX77650_CID_77651A:
+ case MAX77650_CID_77651B:
+ rdescs[MAX77650_REGULATOR_ID_SBB1] = &max77651_SBB1_desc;
+ rdescs[MAX77650_REGULATOR_ID_SBB2] = &max77651_SBB2_desc;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ config.dev = parent;
+
+ for (i = 0; i < MAX77650_REGULATOR_NUM_REGULATORS; i++) {
+ rdesc = rdescs[i];
+ config.driver_data = rdesc;
+
+ rdev = devm_regulator_register(dev, &rdesc->desc, &config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver max77650_regulator_driver = {
+ .driver = {
+ .name = "max77650-regulator",
+ },
+ .probe = max77650_regulator_probe,
+};
+module_platform_driver(max77650_regulator_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 regulator driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index c30cf5c9f2de..ea7b50397300 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -248,9 +248,9 @@ static int max77802_set_ramp_delay_2bit(struct regulator_dev *rdev,
unsigned int ramp_value;
if (id > MAX77802_BUCK4) {
- dev_warn(&rdev->dev,
- "%s: regulator: ramp delay not supported\n",
- rdev->desc->name);
+ dev_warn(&rdev->dev,
+ "%s: regulator: ramp delay not supported\n",
+ rdev->desc->name);
return -EINVAL;
}
ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_2bit,
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 8fd1adc9c9a9..ab558b26cd7c 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -226,69 +226,69 @@ static const unsigned int mc13783_pwgtdrv_val[] = {
5500000,
};
-static struct regulator_ops mc13783_gpo_regulator_ops;
+static const struct regulator_ops mc13783_gpo_regulator_ops;
-#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages) \
- MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \
+#define MC13783_DEFINE(prefix, name, node, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13783_REG_, name, node, reg, vsel_reg, voltages, \
mc13xxx_regulator_ops)
-#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages) \
- MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \
+#define MC13783_FIXED_DEFINE(prefix, name, node, reg, voltages) \
+ MC13xxx_FIXED_DEFINE(MC13783_REG_, name, node, reg, voltages, \
mc13xxx_fixed_regulator_ops)
-#define MC13783_GPO_DEFINE(prefix, name, reg, voltages) \
- MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \
+#define MC13783_GPO_DEFINE(prefix, name, node, reg, voltages) \
+ MC13xxx_GPO_DEFINE(MC13783_REG_, name, node, reg, voltages, \
mc13783_gpo_regulator_ops)
-#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \
- MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
-#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \
- MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
+#define MC13783_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages)
+#define MC13783_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages)
static struct mc13xxx_regulator mc13783_regulators[] = {
- MC13783_DEFINE_SW(SW1A, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val),
- MC13783_DEFINE_SW(SW1B, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val),
- MC13783_DEFINE_SW(SW2A, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val),
- MC13783_DEFINE_SW(SW2B, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val),
- MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
-
- MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
- MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val),
- MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_SW(SW1A, sw1a, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val),
+ MC13783_DEFINE_SW(SW1B, sw1b, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val),
+ MC13783_DEFINE_SW(SW2A, sw2a, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val),
+ MC13783_DEFINE_SW(SW2B, sw2b, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val),
+ MC13783_DEFINE_SW(SW3, sw3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
+
+ MC13783_FIXED_DEFINE(REG, VAUDIO, vaudio, REGULATORMODE0, mc13783_vaudio_val),
+ MC13783_FIXED_DEFINE(REG, VIOHI, viohi, REGULATORMODE0, mc13783_viohi_val),
+ MC13783_DEFINE_REGU(VIOLO, violo, REGULATORMODE0, REGULATORSETTING0,
mc13783_violo_val),
- MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0,
mc13783_vdig_val),
- MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VGEN, vgen, REGULATORMODE0, REGULATORSETTING0,
mc13783_vgen_val),
- MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VRFDIG, vrfdig, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfdig_val),
- MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VRFREF, vrfref, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfref_val),
- MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VRFCP, vrfcp, REGULATORMODE0, REGULATORSETTING0,
mc13783_vrfcp_val),
- MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VSIM, vsim, REGULATORMODE1, REGULATORSETTING0,
mc13783_vsim_val),
- MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VESIM, vesim, REGULATORMODE1, REGULATORSETTING0,
mc13783_vesim_val),
- MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
+ MC13783_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0,
mc13783_vcam_val),
- MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
- MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_FIXED_DEFINE(REG, VRFBG, vrfbg, REGULATORMODE1, mc13783_vrfbg_val),
+ MC13783_DEFINE_REGU(VVIB, vvib, REGULATORMODE1, REGULATORSETTING1,
mc13783_vvib_val),
- MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_DEFINE_REGU(VRF1, vrf1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
- MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_DEFINE_REGU(VRF2, vrf2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vrf_val),
- MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_DEFINE_REGU(VMMC1, vmmc1, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
- MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1,
+ MC13783_DEFINE_REGU(VMMC2, vmmc2, REGULATORMODE1, REGULATORSETTING1,
mc13783_vmmc_val),
- MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val),
- MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
- MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
+ MC13783_GPO_DEFINE(REG, GPO1, gpo1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO2, gpo1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO3, gpo1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, GPO4, gpo1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REG, PWGT1SPI, pwgt1spi, POWERMISC, mc13783_pwgtdrv_val),
+ MC13783_GPO_DEFINE(REG, PWGT2SPI, pwgt2spi, POWERMISC, mc13783_pwgtdrv_val),
};
static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
@@ -380,7 +380,7 @@ static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
return (val & mc13xxx_regulators[id].enable_bit) != 0;
}
-static struct regulator_ops mc13783_gpo_regulator_ops = {
+static const struct regulator_ops mc13783_gpo_regulator_ops = {
.enable = mc13783_gpo_regulator_enable,
.disable = mc13783_gpo_regulator_disable,
.is_enabled = mc13783_gpo_regulator_is_enabled,
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index f3fba1cc1379..a731e826a037 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -242,61 +242,61 @@ static const unsigned int mc13892_pwgtdrv[] = {
5000000,
};
-static struct regulator_ops mc13892_gpo_regulator_ops;
-static struct regulator_ops mc13892_sw_regulator_ops;
+static const struct regulator_ops mc13892_gpo_regulator_ops;
+static const struct regulator_ops mc13892_sw_regulator_ops;
-#define MC13892_FIXED_DEFINE(name, reg, voltages) \
- MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages, \
+#define MC13892_FIXED_DEFINE(name, node, reg, voltages) \
+ MC13xxx_FIXED_DEFINE(MC13892_, name, node, reg, voltages, \
mc13xxx_fixed_regulator_ops)
-#define MC13892_GPO_DEFINE(name, reg, voltages) \
- MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages, \
+#define MC13892_GPO_DEFINE(name, node, reg, voltages) \
+ MC13xxx_GPO_DEFINE(MC13892_, name, node, reg, voltages, \
mc13892_gpo_regulator_ops)
-#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages) \
- MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+#define MC13892_SW_DEFINE(name, node, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \
mc13892_sw_regulator_ops)
-#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages) \
- MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+#define MC13892_DEFINE_REGU(name, node, reg, vsel_reg, voltages) \
+ MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \
mc13xxx_regulator_ops)
static struct mc13xxx_regulator mc13892_regulators[] = {
- MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell),
- MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
- MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw),
- MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw),
- MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
- MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
- MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
- MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VCOINCELL, vcoincell, POWERCTL0, POWERCTL0, mc13892_vcoincell),
+ MC13892_SW_DEFINE(SW1, sw1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
+ MC13892_SW_DEFINE(SW2, sw2, SWITCHERS1, SWITCHERS1, mc13892_sw),
+ MC13892_SW_DEFINE(SW3, sw3, SWITCHERS2, SWITCHERS2, mc13892_sw),
+ MC13892_SW_DEFINE(SW4, sw4, SWITCHERS3, SWITCHERS3, mc13892_sw),
+ MC13892_FIXED_DEFINE(SWBST, swbst, SWITCHERS5, mc13892_swbst),
+ MC13892_FIXED_DEFINE(VIOHI, viohi, REGULATORMODE0, mc13892_viohi),
+ MC13892_DEFINE_REGU(VPLL, vpll, REGULATORMODE0, REGULATORSETTING0,
mc13892_vpll),
- MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0,
mc13892_vdig),
- MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1,
+ MC13892_DEFINE_REGU(VSD, vsd, REGULATORMODE1, REGULATORSETTING1,
mc13892_vsd),
- MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VUSB2, vusb2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vusb2),
- MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1,
+ MC13892_DEFINE_REGU(VVIDEO, vvideo, REGULATORMODE1, REGULATORSETTING1,
mc13892_vvideo),
- MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1,
+ MC13892_DEFINE_REGU(VAUDIO, vaudio, REGULATORMODE1, REGULATORSETTING1,
mc13892_vaudio),
- MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0,
mc13892_vcam),
- MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VGEN1, vgen1, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen1),
- MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VGEN2, vgen2, REGULATORMODE0, REGULATORSETTING0,
mc13892_vgen2),
- MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0,
+ MC13892_DEFINE_REGU(VGEN3, vgen3, REGULATORMODE1, REGULATORSETTING0,
mc13892_vgen3),
- MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
- MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
- MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo),
- MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo),
- MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo),
- MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv),
- MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv),
+ MC13892_FIXED_DEFINE(VUSB, vusb, USB1, mc13892_vusb),
+ MC13892_GPO_DEFINE(GPO1, gpo1, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO2, gpo2, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO3, gpo3, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(GPO4, gpo4, POWERMISC, mc13892_gpo),
+ MC13892_GPO_DEFINE(PWGT1SPI, pwgt1spi, POWERMISC, mc13892_pwgtdrv),
+ MC13892_GPO_DEFINE(PWGT2SPI, pwgt2spi, POWERMISC, mc13892_pwgtdrv),
};
static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
@@ -387,7 +387,7 @@ static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev)
}
-static struct regulator_ops mc13892_gpo_regulator_ops = {
+static const struct regulator_ops mc13892_gpo_regulator_ops = {
.enable = mc13892_gpo_regulator_enable,
.disable = mc13892_gpo_regulator_disable,
.is_enabled = mc13892_gpo_regulator_is_enabled,
@@ -479,7 +479,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
-static struct regulator_ops mc13892_sw_regulator_ops = {
+static const struct regulator_ops mc13892_sw_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = mc13892_sw_regulator_set_voltage_sel,
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 2243138d8a58..8ff19150ca92 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -99,7 +99,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
return rdev->desc->volt_table[val];
}
-struct regulator_ops mc13xxx_regulator_ops = {
+const struct regulator_ops mc13xxx_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
@@ -127,7 +127,7 @@ int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
}
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
-struct regulator_ops mc13xxx_fixed_regulator_ops = {
+const struct regulator_ops mc13xxx_fixed_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 2ab9bfd93b4e..ba7eff1070bd 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -53,13 +53,13 @@ static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
}
#endif
-extern struct regulator_ops mc13xxx_regulator_ops;
-extern struct regulator_ops mc13xxx_fixed_regulator_ops;
+extern const struct regulator_ops mc13xxx_regulator_ops;
+extern const struct regulator_ops mc13xxx_fixed_regulator_ops;
-#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \
+#define MC13xxx_DEFINE(prefix, _name, _node, _reg, _vsel_reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #_name, \
+ .name = #_node, \
.n_voltages = ARRAY_SIZE(_voltages), \
.volt_table = _voltages, \
.ops = &_ops, \
@@ -74,10 +74,10 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\
}
-#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \
+#define MC13xxx_FIXED_DEFINE(prefix, _name, _node, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #_name, \
+ .name = #_node, \
.n_voltages = ARRAY_SIZE(_voltages), \
.volt_table = _voltages, \
.ops = &_ops, \
@@ -89,10 +89,10 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.enable_bit = prefix ## _reg ## _ ## _name ## EN, \
}
-#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \
+#define MC13xxx_GPO_DEFINE(prefix, _name, _node, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #_name, \
+ .name = #_node, \
.n_voltages = ARRAY_SIZE(_voltages), \
.volt_table = _voltages, \
.ops = &_ops, \
@@ -104,9 +104,9 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.enable_bit = prefix ## _reg ## _ ## _name ## EN, \
}
-#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \
- MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops)
-#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops) \
- MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops)
+#define MC13xxx_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages, ops) \
+ MC13xxx_DEFINE(SW, _name, _node, _reg, _vsel_reg, _voltages, ops)
+#define MC13xxx_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages, ops) \
+ MC13xxx_DEFINE(REGU, _name, _node, _reg, _vsel_reg, _voltages, ops)
#endif
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 3479ae009b0b..3a8004abe044 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -17,6 +17,7 @@
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/suspend.h>
+#include <linux/gpio/consumer.h>
#define VDD_LOW_SEL 0x0D
#define VDD_HIGH_SEL 0x3F
@@ -546,7 +547,6 @@ static struct i2c_driver mcp16502_drv = {
module_i2c_driver(mcp16502_drv);
-MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MCP16502 PMIC driver");
MODULE_AUTHOR("Andrei Stefanescu andrei.stefanescu@microchip.com");
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index 0495716fd35f..01d69f43d2b0 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -38,13 +38,9 @@ static const struct regmap_config mt6311_regmap_config = {
#define MT6311_MAX_UV 1393750
#define MT6311_STEP_UV 6250
-static const struct regulator_linear_range buck_volt_range[] = {
- REGULATOR_LINEAR_RANGE(MT6311_MIN_UV, 0, 0x7f, MT6311_STEP_UV),
-};
-
static const struct regulator_ops mt6311_buck_ops = {
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
@@ -71,8 +67,6 @@ static const struct regulator_ops mt6311_ldo_ops = {
.min_uV = MT6311_MIN_UV,\
.uV_step = MT6311_STEP_UV,\
.owner = THIS_MODULE,\
- .linear_ranges = buck_volt_range, \
- .n_linear_ranges = ARRAY_SIZE(buck_volt_range), \
.enable_reg = MT6311_VDVFS11_CON9,\
.enable_mask = MT6311_PMIC_VDVFS11_EN_MASK,\
.vsel_reg = MT6311_VDVFS11_CON12,\
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index ffa5fc3724e4..7b6bf3536271 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -255,7 +255,7 @@ static void of_get_regulation_constraints(struct device_node *np,
* @desc: regulator description
*
* Populates regulator_init_data structure by extracting data from device
- * tree node, returns a pointer to the populated struture or NULL if memory
+ * tree node, returns a pointer to the populated structure or NULL if memory
* alloc fails.
*/
struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
@@ -547,7 +547,7 @@ bool of_check_coupling_data(struct regulator_dev *rdev)
NULL);
if (c_n_phandles != n_phandles) {
- dev_err(&rdev->dev, "number of couped reg phandles mismatch\n");
+ dev_err(&rdev->dev, "number of coupled reg phandles mismatch\n");
ret = false;
goto clean;
}
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c2cc392a27d4..7fb9e8dd834e 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -382,7 +382,7 @@ static struct palmas_sleep_requestor_info tps65917_sleep_req_info[] = {
EXTERNAL_REQUESTOR_TPS65917(LDO5, 2, 4),
};
-static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
+static const unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
#define SMPS_CTRL_MODE_OFF 0x00
#define SMPS_CTRL_MODE_ON 0x01
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index a9446056435f..1600f9821891 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -53,10 +53,6 @@ enum {
struct pv88060_regulator {
struct regulator_desc desc;
- /* Current limiting */
- unsigned n_current_limits;
- const int *current_limits;
- unsigned int limit_mask;
unsigned int conf; /* buck configuration register */
};
@@ -75,7 +71,7 @@ static const struct regmap_config pv88060_regmap_config = {
* Entry indexes corresponds to register values.
*/
-static const int pv88060_buck1_limits[] = {
+static const unsigned int pv88060_buck1_limits[] = {
1496000, 2393000, 3291000, 4189000
};
@@ -128,40 +124,6 @@ static int pv88060_buck_set_mode(struct regulator_dev *rdev,
PV88060_BUCK_MODE_MASK, val);
}
-static int pv88060_set_current_limit(struct regulator_dev *rdev, int min,
- int max)
-{
- struct pv88060_regulator *info = rdev_get_drvdata(rdev);
- int i;
-
- /* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
- if (min <= info->current_limits[i]
- && max >= info->current_limits[i]) {
- return regmap_update_bits(rdev->regmap,
- info->conf,
- info->limit_mask,
- i << PV88060_BUCK_ILIM_SHIFT);
- }
- }
-
- return -EINVAL;
-}
-
-static int pv88060_get_current_limit(struct regulator_dev *rdev)
-{
- struct pv88060_regulator *info = rdev_get_drvdata(rdev);
- unsigned int data;
- int ret;
-
- ret = regmap_read(rdev->regmap, info->conf, &data);
- if (ret < 0)
- return ret;
-
- data = (data & info->limit_mask) >> PV88060_BUCK_ILIM_SHIFT;
- return info->current_limits[data];
-}
-
static const struct regulator_ops pv88060_buck_ops = {
.get_mode = pv88060_buck_get_mode,
.set_mode = pv88060_buck_set_mode,
@@ -171,8 +133,8 @@ static const struct regulator_ops pv88060_buck_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = pv88060_set_current_limit,
- .get_current_limit = pv88060_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_ops pv88060_ldo_ops = {
@@ -184,6 +146,12 @@ static const struct regulator_ops pv88060_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
};
+static const struct regulator_ops pv88060_sw_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
#define PV88060_BUCK(chip, regl_name, min, step, max, limits_array) \
{\
.desc = {\
@@ -201,10 +169,11 @@ static const struct regulator_ops pv88060_ldo_ops = {
.enable_mask = PV88060_BUCK_EN, \
.vsel_reg = PV88060_REG_##regl_name##_CONF0,\
.vsel_mask = PV88060_VBUCK_MASK,\
+ .curr_table = limits_array,\
+ .n_current_limits = ARRAY_SIZE(limits_array),\
+ .csel_reg = PV88060_REG_##regl_name##_CONF1,\
+ .csel_mask = PV88060_BUCK_ILIM_MASK,\
},\
- .current_limits = limits_array,\
- .n_current_limits = ARRAY_SIZE(limits_array),\
- .limit_mask = PV88060_BUCK_ILIM_MASK, \
.conf = PV88060_REG_##regl_name##_CONF1,\
}
@@ -237,9 +206,8 @@ static const struct regulator_ops pv88060_ldo_ops = {
.regulators_node = of_match_ptr("regulators"),\
.type = REGULATOR_VOLTAGE,\
.owner = THIS_MODULE,\
- .ops = &pv88060_ldo_ops,\
- .min_uV = max,\
- .uV_step = 0,\
+ .ops = &pv88060_sw_ops,\
+ .fixed_uV = max,\
.n_voltages = 1,\
.enable_reg = PV88060_REG_##regl_name##_CONF,\
.enable_mask = PV88060_SW_EN,\
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index 9a08cb2de501..bdddacdbeb99 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -45,12 +45,7 @@ enum pv88080_types {
struct pv88080_regulator {
struct regulator_desc desc;
- /* Current limiting */
- unsigned int n_current_limits;
- const int *current_limits;
- unsigned int limit_mask;
unsigned int mode_reg;
- unsigned int limit_reg;
unsigned int conf2;
unsigned int conf5;
};
@@ -102,11 +97,11 @@ static const struct regmap_config pv88080_regmap_config = {
* Entry indexes corresponds to register values.
*/
-static const int pv88080_buck1_limits[] = {
+static const unsigned int pv88080_buck1_limits[] = {
3230000, 5130000, 6960000, 8790000
};
-static const int pv88080_buck23_limits[] = {
+static const unsigned int pv88080_buck23_limits[] = {
1496000, 2393000, 3291000, 4189000
};
@@ -272,40 +267,6 @@ static int pv88080_buck_set_mode(struct regulator_dev *rdev,
PV88080_BUCK1_MODE_MASK, val);
}
-static int pv88080_set_current_limit(struct regulator_dev *rdev, int min,
- int max)
-{
- struct pv88080_regulator *info = rdev_get_drvdata(rdev);
- int i;
-
- /* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
- if (min <= info->current_limits[i]
- && max >= info->current_limits[i]) {
- return regmap_update_bits(rdev->regmap,
- info->limit_reg,
- info->limit_mask,
- i << PV88080_BUCK1_ILIM_SHIFT);
- }
- }
-
- return -EINVAL;
-}
-
-static int pv88080_get_current_limit(struct regulator_dev *rdev)
-{
- struct pv88080_regulator *info = rdev_get_drvdata(rdev);
- unsigned int data;
- int ret;
-
- ret = regmap_read(rdev->regmap, info->limit_reg, &data);
- if (ret < 0)
- return ret;
-
- data = (data & info->limit_mask) >> PV88080_BUCK1_ILIM_SHIFT;
- return info->current_limits[data];
-}
-
static const struct regulator_ops pv88080_buck_ops = {
.get_mode = pv88080_buck_get_mode,
.set_mode = pv88080_buck_set_mode,
@@ -315,8 +276,8 @@ static const struct regulator_ops pv88080_buck_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = pv88080_set_current_limit,
- .get_current_limit = pv88080_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_ops pv88080_hvbuck_ops = {
@@ -341,9 +302,9 @@ static const struct regulator_ops pv88080_hvbuck_ops = {
.min_uV = min, \
.uV_step = step, \
.n_voltages = ((max) - (min))/(step) + 1, \
+ .curr_table = limits_array, \
+ .n_current_limits = ARRAY_SIZE(limits_array), \
},\
- .current_limits = limits_array, \
- .n_current_limits = ARRAY_SIZE(limits_array), \
}
#define PV88080_HVBUCK(chip, regl_name, min, step, max) \
@@ -521,9 +482,9 @@ static int pv88080_i2c_probe(struct i2c_client *i2c,
if (init_data)
config.init_data = &init_data[i];
- pv88080_regulator_info[i].limit_reg
+ pv88080_regulator_info[i].desc.csel_reg
= regmap_config->buck_regmap[i].buck_limit_reg;
- pv88080_regulator_info[i].limit_mask
+ pv88080_regulator_info[i].desc.csel_mask
= regmap_config->buck_regmap[i].buck_limit_mask;
pv88080_regulator_info[i].mode_reg
= regmap_config->buck_regmap[i].buck_mode_reg;
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 7a0c15957bd0..6e97cc6df2ee 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -42,10 +42,6 @@ enum {
struct pv88090_regulator {
struct regulator_desc desc;
- /* Current limiting */
- unsigned int n_current_limits;
- const int *current_limits;
- unsigned int limit_mask;
unsigned int conf;
unsigned int conf2;
};
@@ -71,14 +67,14 @@ static const struct regmap_config pv88090_regmap_config = {
* Entry indexes corresponds to register values.
*/
-static const int pv88090_buck1_limits[] = {
+static const unsigned int pv88090_buck1_limits[] = {
220000, 440000, 660000, 880000, 1100000, 1320000, 1540000, 1760000,
1980000, 2200000, 2420000, 2640000, 2860000, 3080000, 3300000, 3520000,
3740000, 3960000, 4180000, 4400000, 4620000, 4840000, 5060000, 5280000,
5500000, 5720000, 5940000, 6160000, 6380000, 6600000, 6820000, 7040000
};
-static const int pv88090_buck23_limits[] = {
+static const unsigned int pv88090_buck23_limits[] = {
1496000, 2393000, 3291000, 4189000
};
@@ -150,40 +146,6 @@ static int pv88090_buck_set_mode(struct regulator_dev *rdev,
PV88090_BUCK1_MODE_MASK, val);
}
-static int pv88090_set_current_limit(struct regulator_dev *rdev, int min,
- int max)
-{
- struct pv88090_regulator *info = rdev_get_drvdata(rdev);
- int i;
-
- /* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
- if (min <= info->current_limits[i]
- && max >= info->current_limits[i]) {
- return regmap_update_bits(rdev->regmap,
- info->conf,
- info->limit_mask,
- i << PV88090_BUCK1_ILIM_SHIFT);
- }
- }
-
- return -EINVAL;
-}
-
-static int pv88090_get_current_limit(struct regulator_dev *rdev)
-{
- struct pv88090_regulator *info = rdev_get_drvdata(rdev);
- unsigned int data;
- int ret;
-
- ret = regmap_read(rdev->regmap, info->conf, &data);
- if (ret < 0)
- return ret;
-
- data = (data & info->limit_mask) >> PV88090_BUCK1_ILIM_SHIFT;
- return info->current_limits[data];
-}
-
static const struct regulator_ops pv88090_buck_ops = {
.get_mode = pv88090_buck_get_mode,
.set_mode = pv88090_buck_set_mode,
@@ -193,8 +155,8 @@ static const struct regulator_ops pv88090_buck_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .set_current_limit = pv88090_set_current_limit,
- .get_current_limit = pv88090_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
};
static const struct regulator_ops pv88090_ldo_ops = {
@@ -223,10 +185,11 @@ static const struct regulator_ops pv88090_ldo_ops = {
.enable_mask = PV88090_##regl_name##_EN, \
.vsel_reg = PV88090_REG_##regl_name##_CONF0, \
.vsel_mask = PV88090_V##regl_name##_MASK, \
+ .curr_table = limits_array, \
+ .n_current_limits = ARRAY_SIZE(limits_array), \
+ .csel_reg = PV88090_REG_##regl_name##_CONF1, \
+ .csel_mask = PV88090_##regl_name##_ILIM_MASK, \
},\
- .current_limits = limits_array, \
- .n_current_limits = ARRAY_SIZE(limits_array), \
- .limit_mask = PV88090_##regl_name##_ILIM_MASK, \
.conf = PV88090_REG_##regl_name##_CONF1, \
.conf2 = PV88090_REG_##regl_name##_CONF2, \
}
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index a2fd140eff81..3f53f9134b32 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -40,9 +40,6 @@ struct pwm_regulator_data {
/* regulator descriptor */
struct regulator_desc desc;
- /* Regulator ops */
- struct regulator_ops ops;
-
int state;
/* Enable GPIO */
@@ -231,7 +228,7 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
return 0;
}
-static struct regulator_ops pwm_regulator_voltage_table_ops = {
+static const struct regulator_ops pwm_regulator_voltage_table_ops = {
.set_voltage_sel = pwm_regulator_set_voltage_sel,
.get_voltage_sel = pwm_regulator_get_voltage_sel,
.list_voltage = pwm_regulator_list_voltage,
@@ -241,7 +238,7 @@ static struct regulator_ops pwm_regulator_voltage_table_ops = {
.is_enabled = pwm_regulator_is_enabled,
};
-static struct regulator_ops pwm_regulator_voltage_continuous_ops = {
+static const struct regulator_ops pwm_regulator_voltage_continuous_ops = {
.get_voltage = pwm_regulator_get_voltage,
.set_voltage = pwm_regulator_set_voltage,
.enable = pwm_regulator_enable,
@@ -249,7 +246,7 @@ static struct regulator_ops pwm_regulator_voltage_continuous_ops = {
.is_enabled = pwm_regulator_is_enabled,
};
-static struct regulator_desc pwm_regulator_desc = {
+static const struct regulator_desc pwm_regulator_desc = {
.name = "pwm-regulator",
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
@@ -287,9 +284,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
drvdata->state = -EINVAL;
drvdata->duty_cycle_table = duty_cycle_table;
- memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops,
- sizeof(drvdata->ops));
- drvdata->desc.ops = &drvdata->ops;
+ drvdata->desc.ops = &pwm_regulator_voltage_table_ops;
drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table);
return 0;
@@ -301,9 +296,7 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev,
u32 dutycycle_range[2] = { 0, 100 };
u32 dutycycle_unit = 100;
- memcpy(&drvdata->ops, &pwm_regulator_voltage_continuous_ops,
- sizeof(drvdata->ops));
- drvdata->desc.ops = &drvdata->ops;
+ drvdata->desc.ops = &pwm_regulator_voltage_continuous_ops;
drvdata->desc.continuous_voltage_range = true;
of_property_read_u32_array(pdev->dev.of_node,
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index f5bca77d67c1..68bc23df4213 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -31,6 +31,11 @@ struct qcom_rpm_reg {
int is_enabled;
int uV;
+ u32 load;
+
+ unsigned int enabled_updated:1;
+ unsigned int uv_updated:1;
+ unsigned int load_updated:1;
};
struct rpm_regulator_req {
@@ -43,30 +48,59 @@ struct rpm_regulator_req {
#define RPM_KEY_UV 0x00007675 /* "uv" */
#define RPM_KEY_MA 0x0000616d /* "ma" */
-static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
- struct rpm_regulator_req *req,
- size_t size)
+static int rpm_reg_write_active(struct qcom_rpm_reg *vreg)
{
- return qcom_rpm_smd_write(vreg->rpm,
- QCOM_SMD_RPM_ACTIVE_STATE,
- vreg->type,
- vreg->id,
- req, size);
+ struct rpm_regulator_req req[3];
+ int reqlen = 0;
+ int ret;
+
+ if (vreg->enabled_updated) {
+ req[reqlen].key = cpu_to_le32(RPM_KEY_SWEN);
+ req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
+ req[reqlen].value = cpu_to_le32(vreg->is_enabled);
+ reqlen++;
+ }
+
+ if (vreg->uv_updated && vreg->is_enabled) {
+ req[reqlen].key = cpu_to_le32(RPM_KEY_UV);
+ req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
+ req[reqlen].value = cpu_to_le32(vreg->uV);
+ reqlen++;
+ }
+
+ if (vreg->load_updated && vreg->is_enabled) {
+ req[reqlen].key = cpu_to_le32(RPM_KEY_MA);
+ req[reqlen].nbytes = cpu_to_le32(sizeof(u32));
+ req[reqlen].value = cpu_to_le32(vreg->load / 1000);
+ reqlen++;
+ }
+
+ if (!reqlen)
+ return 0;
+
+ ret = qcom_rpm_smd_write(vreg->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ vreg->type, vreg->id,
+ req, sizeof(req[0]) * reqlen);
+ if (!ret) {
+ vreg->enabled_updated = 0;
+ vreg->uv_updated = 0;
+ vreg->load_updated = 0;
+ }
+
+ return ret;
}
static int rpm_reg_enable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
- struct rpm_regulator_req req;
int ret;
- req.key = cpu_to_le32(RPM_KEY_SWEN);
- req.nbytes = cpu_to_le32(sizeof(u32));
- req.value = cpu_to_le32(1);
+ vreg->is_enabled = 1;
+ vreg->enabled_updated = 1;
- ret = rpm_reg_write_active(vreg, &req, sizeof(req));
- if (!ret)
- vreg->is_enabled = 1;
+ ret = rpm_reg_write_active(vreg);
+ if (ret)
+ vreg->is_enabled = 0;
return ret;
}
@@ -81,16 +115,14 @@ static int rpm_reg_is_enabled(struct regulator_dev *rdev)
static int rpm_reg_disable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
- struct rpm_regulator_req req;
int ret;
- req.key = cpu_to_le32(RPM_KEY_SWEN);
- req.nbytes = cpu_to_le32(sizeof(u32));
- req.value = 0;
+ vreg->is_enabled = 0;
+ vreg->enabled_updated = 1;
- ret = rpm_reg_write_active(vreg, &req, sizeof(req));
- if (!ret)
- vreg->is_enabled = 0;
+ ret = rpm_reg_write_active(vreg);
+ if (ret)
+ vreg->is_enabled = 1;
return ret;
}
@@ -108,16 +140,15 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev,
unsigned *selector)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
- struct rpm_regulator_req req;
- int ret = 0;
+ int ret;
+ int old_uV = vreg->uV;
- req.key = cpu_to_le32(RPM_KEY_UV);
- req.nbytes = cpu_to_le32(sizeof(u32));
- req.value = cpu_to_le32(min_uV);
+ vreg->uV = min_uV;
+ vreg->uv_updated = 1;
- ret = rpm_reg_write_active(vreg, &req, sizeof(req));
- if (!ret)
- vreg->uV = min_uV;
+ ret = rpm_reg_write_active(vreg);
+ if (ret)
+ vreg->uV = old_uV;
return ret;
}
@@ -125,13 +156,16 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev,
static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
- struct rpm_regulator_req req;
+ u32 old_load = vreg->load;
+ int ret;
- req.key = cpu_to_le32(RPM_KEY_MA);
- req.nbytes = cpu_to_le32(sizeof(u32));
- req.value = cpu_to_le32(load_uA / 1000);
+ vreg->load = load_uA;
+ vreg->load_updated = 1;
+ ret = rpm_reg_write_active(vreg);
+ if (ret)
+ vreg->load = old_load;
- return rpm_reg_write_active(vreg, &req, sizeof(req));
+ return ret;
}
static const struct regulator_ops rpm_smps_ldo_ops = {
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 213b68743cc8..23713e16c286 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1,5 +1,5 @@
/*
- * Regulator driver for Rockchip RK808/RK818
+ * Regulator driver for Rockchip RK805/RK808/RK818
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*
@@ -363,28 +363,28 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev)
rdev->desc->enable_mask);
}
-static struct regulator_ops rk805_reg_ops = {
- .list_voltage = regulator_list_voltage_linear,
- .map_voltage = regulator_map_voltage_linear,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .set_suspend_voltage = rk808_set_suspend_voltage,
- .set_suspend_enable = rk805_set_suspend_enable,
- .set_suspend_disable = rk805_set_suspend_disable,
+static const struct regulator_ops rk805_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_voltage = rk808_set_suspend_voltage,
+ .set_suspend_enable = rk805_set_suspend_enable,
+ .set_suspend_disable = rk805_set_suspend_disable,
};
-static struct regulator_ops rk805_switch_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .set_suspend_enable = rk805_set_suspend_enable,
- .set_suspend_disable = rk805_set_suspend_disable,
+static const struct regulator_ops rk805_switch_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_enable = rk805_set_suspend_enable,
+ .set_suspend_disable = rk805_set_suspend_disable,
};
-static struct regulator_ops rk808_buck1_2_ops = {
+static const struct regulator_ops rk808_buck1_2_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = rk808_buck1_2_get_voltage_sel_regmap,
@@ -399,7 +399,7 @@ static struct regulator_ops rk808_buck1_2_ops = {
.set_suspend_disable = rk808_set_suspend_disable,
};
-static struct regulator_ops rk808_reg_ops = {
+static const struct regulator_ops rk808_reg_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -412,7 +412,7 @@ static struct regulator_ops rk808_reg_ops = {
.set_suspend_disable = rk808_set_suspend_disable,
};
-static struct regulator_ops rk808_reg_ops_ranges = {
+static const struct regulator_ops rk808_reg_ops_ranges = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -425,7 +425,7 @@ static struct regulator_ops rk808_reg_ops_ranges = {
.set_suspend_disable = rk808_set_suspend_disable,
};
-static struct regulator_ops rk808_switch_ops = {
+static const struct regulator_ops rk808_switch_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -433,6 +433,12 @@ static struct regulator_ops rk808_switch_ops = {
.set_suspend_disable = rk808_set_suspend_disable,
};
+static const struct regulator_linear_range rk805_buck_1_2_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(712500, 0, 59, 12500),
+ REGULATOR_LINEAR_RANGE(1800000, 60, 62, 200000),
+ REGULATOR_LINEAR_RANGE(2300000, 63, 63, 0),
+};
+
static const struct regulator_desc rk805_reg[] = {
{
.name = "DCDC_REG1",
@@ -440,11 +446,11 @@ static const struct regulator_desc rk805_reg[] = {
.of_match = of_match_ptr("DCDC_REG1"),
.regulators_node = of_match_ptr("regulators"),
.id = RK805_ID_DCDC1,
- .ops = &rk805_reg_ops,
+ .ops = &rk808_reg_ops_ranges,
.type = REGULATOR_VOLTAGE,
- .min_uV = 712500,
- .uV_step = 12500,
.n_voltages = 64,
+ .linear_ranges = rk805_buck_1_2_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk805_buck_1_2_voltage_ranges),
.vsel_reg = RK805_BUCK1_ON_VSEL_REG,
.vsel_mask = RK818_BUCK_VSEL_MASK,
.enable_reg = RK805_DCDC_EN_REG,
@@ -456,11 +462,11 @@ static const struct regulator_desc rk805_reg[] = {
.of_match = of_match_ptr("DCDC_REG2"),
.regulators_node = of_match_ptr("regulators"),
.id = RK805_ID_DCDC2,
- .ops = &rk805_reg_ops,
+ .ops = &rk808_reg_ops_ranges,
.type = REGULATOR_VOLTAGE,
- .min_uV = 712500,
- .uV_step = 12500,
.n_voltages = 64,
+ .linear_ranges = rk805_buck_1_2_voltage_ranges,
+ .n_linear_ranges = ARRAY_SIZE(rk805_buck_1_2_voltage_ranges),
.vsel_reg = RK805_BUCK2_ON_VSEL_REG,
.vsel_mask = RK818_BUCK_VSEL_MASK,
.enable_reg = RK805_DCDC_EN_REG,
@@ -796,7 +802,7 @@ static struct platform_driver rk808_regulator_driver = {
module_platform_driver(rk808_regulator_driver);
-MODULE_DESCRIPTION("regulator driver for the RK808/RK818 series PMICs");
+MODULE_DESCRIPTION("regulator driver for the RK805/RK808/RK818 series PMICs");
MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
MODULE_AUTHOR("Zhang Qing <zhangqing@rock-chips.com>");
MODULE_AUTHOR("Wadim Egorov <w.egorov@phytec.de>");
diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c
index 96d2c18e051a..639cbadc044a 100644
--- a/drivers/regulator/rt5033-regulator.c
+++ b/drivers/regulator/rt5033-regulator.c
@@ -16,14 +16,14 @@
#include <linux/mfd/rt5033-private.h>
#include <linux/regulator/of_regulator.h>
-static struct regulator_ops rt5033_safe_ldo_ops = {
+static const struct regulator_ops rt5033_safe_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops rt5033_buck_ops = {
+static const struct regulator_ops rt5033_buck_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 095d25f3d2ea..58a1fe583a6c 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = {
regulator_desc_ldo(2, STEP_50_MV),
regulator_desc_ldo(3, STEP_50_MV),
regulator_desc_ldo(4, STEP_50_MV),
- regulator_desc_ldo(5, STEP_50_MV),
+ regulator_desc_ldo(5, STEP_25_MV),
regulator_desc_ldo(6, STEP_25_MV),
regulator_desc_ldo(7, STEP_50_MV),
regulator_desc_ldo(8, STEP_50_MV),
regulator_desc_ldo(9, STEP_50_MV),
regulator_desc_ldo(10, STEP_50_MV),
- regulator_desc_ldo(11, STEP_25_MV),
+ regulator_desc_ldo(11, STEP_50_MV),
regulator_desc_ldo(12, STEP_50_MV),
regulator_desc_ldo(13, STEP_50_MV),
regulator_desc_ldo(14, STEP_50_MV),
@@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = {
regulator_desc_ldo(19, STEP_50_MV),
regulator_desc_ldo(20, STEP_50_MV),
regulator_desc_ldo(21, STEP_50_MV),
- regulator_desc_ldo(22, STEP_25_MV),
- regulator_desc_ldo(23, STEP_25_MV),
+ regulator_desc_ldo(22, STEP_50_MV),
+ regulator_desc_ldo(23, STEP_50_MV),
regulator_desc_ldo(24, STEP_50_MV),
regulator_desc_ldo(25, STEP_50_MV),
- regulator_desc_ldo(26, STEP_50_MV),
+ regulator_desc_ldo(26, STEP_25_MV),
regulator_desc_buck1_4(1),
regulator_desc_buck1_4(2),
regulator_desc_buck1_4(3),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index ee4a23ab0663..134c62db36c5 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -362,7 +362,7 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_ldo(32, STEP_50_MV),
regulator_desc_s2mps11_ldo(33, STEP_50_MV),
regulator_desc_s2mps11_ldo(34, STEP_50_MV),
- regulator_desc_s2mps11_ldo(35, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(35, STEP_25_MV),
regulator_desc_s2mps11_ldo(36, STEP_50_MV),
regulator_desc_s2mps11_ldo(37, STEP_50_MV),
regulator_desc_s2mps11_ldo(38, STEP_50_MV),
@@ -372,8 +372,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
regulator_desc_s2mps11_buck9,
regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
};
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index b581f01f3395..bb9d1a083299 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -115,7 +115,7 @@ static const struct sec_voltage_desc *reg_voltage_map[] = {
[S5M8767_BUCK9] = &buck_voltage_val3,
};
-static unsigned int s5m8767_opmode_reg[][4] = {
+static const unsigned int s5m8767_opmode_reg[][4] = {
/* {OFF, ON, LOWPOWER, SUSPEND} */
/* LDO1 ... LDO28 */
{0x0, 0x3, 0x2, 0x1}, /* LDO1 */
@@ -339,13 +339,9 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_sel)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- const struct sec_voltage_desc *desc;
- int reg_id = rdev_get_id(rdev);
-
- desc = reg_voltage_map[reg_id];
if ((old_sel < new_sel) && s5m8767->ramp_delay)
- return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
+ return DIV_ROUND_UP(rdev->desc->uV_step * (new_sel - old_sel),
s5m8767->ramp_delay * 1000);
return 0;
}
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index e0a9c445ed67..ba2f24949dc9 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
+#include <linux/pm_runtime.h>
/* STM32 VREFBUF registers */
#define STM32_VREFBUF_CSR 0x00
@@ -25,9 +26,12 @@
#define STM32_HIZ BIT(1)
#define STM32_ENVR BIT(0)
+#define STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS 10
+
struct stm32_vrefbuf {
void __iomem *base;
struct clk *clk;
+ struct device *dev;
};
static const unsigned int stm32_vrefbuf_voltages[] = {
@@ -38,9 +42,16 @@ static const unsigned int stm32_vrefbuf_voltages[] = {
static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
- u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ u32 val;
int ret;
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_HIZ) | STM32_ENVR;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
@@ -59,45 +70,95 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
}
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
return ret;
}
static int stm32_vrefbuf_disable(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
- u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ u32 val;
+ int ret;
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_ENVR) | STM32_HIZ;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
return 0;
}
static int stm32_vrefbuf_is_enabled(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ret = readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR;
- return readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR;
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
+ return ret;
}
static int stm32_vrefbuf_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
- u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ u32 val;
+ int ret;
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
val = (val & ~STM32_VRS) | FIELD_PREP(STM32_VRS, sel);
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
return 0;
}
static int stm32_vrefbuf_get_voltage_sel(struct regulator_dev *rdev)
{
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
- u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ u32 val;
+ int ret;
- return FIELD_GET(STM32_VRS, val);
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
+ ret = FIELD_GET(STM32_VRS, val);
+
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_autosuspend(priv->dev);
+
+ return ret;
}
static const struct regulator_ops stm32_vrefbuf_volt_ops = {
@@ -130,6 +191,7 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
@@ -140,10 +202,17 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&pdev->dev, "clk prepare failed with error %d\n", ret);
- return ret;
+ goto err_pm_stop;
}
config.dev = &pdev->dev;
@@ -161,10 +230,17 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rdev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
err_clk_dis:
clk_disable_unprepare(priv->clk);
+err_pm_stop:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
return ret;
}
@@ -174,12 +250,42 @@ static int stm32_vrefbuf_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
+ pm_runtime_get_sync(&pdev->dev);
regulator_unregister(rdev);
clk_disable_unprepare(priv->clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
return 0;
};
+static int __maybe_unused stm32_vrefbuf_runtime_suspend(struct device *dev)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_vrefbuf_runtime_resume(struct device *dev)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
+
+ return clk_prepare_enable(priv->clk);
+}
+
+static const struct dev_pm_ops stm32_vrefbuf_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_vrefbuf_runtime_suspend,
+ stm32_vrefbuf_runtime_resume,
+ NULL)
+};
+
static const struct of_device_id stm32_vrefbuf_of_match[] = {
{ .compatible = "st,stm32-vrefbuf", },
{},
@@ -192,6 +298,7 @@ static struct platform_driver stm32_vrefbuf_driver = {
.driver = {
.name = "stm32-vrefbuf",
.of_match_table = of_match_ptr(stm32_vrefbuf_of_match),
+ .pm = &stm32_vrefbuf_pm_ops,
},
};
module_platform_driver(stm32_vrefbuf_driver);
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index 16ba0297f709..f09061473613 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -12,8 +12,10 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+
/**
- * stpmic1 regulator description
+ * stpmic1 regulator description: this structure is used as driver data
* @desc: regulator framework description
* @mask_reset_reg: mask reset register address
* @mask_reset_mask: mask rank and mask reset register mask
@@ -28,28 +30,9 @@ struct stpmic1_regulator_cfg {
u8 icc_mask;
};
-/**
- * stpmic1 regulator data: this structure is used as driver data
- * @regul_id: regulator id
- * @reg_node: DT node of regulator (unused on non-DT platforms)
- * @cfg: stpmic specific regulator description
- * @mask_reset: mask_reset bit value
- * @irq_curlim: current limit interrupt number
- * @regmap: point to parent regmap structure
- */
-struct stpmic1_regulator {
- unsigned int regul_id;
- struct device_node *reg_node;
- struct stpmic1_regulator_cfg *cfg;
- u8 mask_reset;
- int irq_curlim;
- struct regmap *regmap;
-};
-
static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode);
static unsigned int stpmic1_get_mode(struct regulator_dev *rdev);
static int stpmic1_set_icc(struct regulator_dev *rdev);
-static int stpmic1_regulator_parse_dt(void *driver_data);
static unsigned int stpmic1_map_mode(unsigned int mode);
enum {
@@ -72,15 +55,13 @@ enum {
/* Enable time worst case is 5000mV/(2250uV/uS) */
#define PMIC_ENABLE_TIME_US 2200
-#define STPMIC1_BUCK_MODE_NORMAL 0
-#define STPMIC1_BUCK_MODE_LP BUCK_HPLP_ENABLE_MASK
-
-struct regulator_linear_range buck1_ranges[] = {
- REGULATOR_LINEAR_RANGE(600000, 0, 30, 25000),
- REGULATOR_LINEAR_RANGE(1350000, 31, 63, 0),
+static const struct regulator_linear_range buck1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(725000, 0, 4, 0),
+ REGULATOR_LINEAR_RANGE(725000, 5, 36, 25000),
+ REGULATOR_LINEAR_RANGE(1500000, 37, 63, 0),
};
-struct regulator_linear_range buck2_ranges[] = {
+static const struct regulator_linear_range buck2_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 17, 0),
REGULATOR_LINEAR_RANGE(1050000, 18, 19, 0),
REGULATOR_LINEAR_RANGE(1100000, 20, 21, 0),
@@ -94,7 +75,7 @@ struct regulator_linear_range buck2_ranges[] = {
REGULATOR_LINEAR_RANGE(1500000, 36, 63, 0),
};
-struct regulator_linear_range buck3_ranges[] = {
+static const struct regulator_linear_range buck3_ranges[] = {
REGULATOR_LINEAR_RANGE(1000000, 0, 19, 0),
REGULATOR_LINEAR_RANGE(1100000, 20, 23, 0),
REGULATOR_LINEAR_RANGE(1200000, 24, 27, 0),
@@ -102,10 +83,9 @@ struct regulator_linear_range buck3_ranges[] = {
REGULATOR_LINEAR_RANGE(1400000, 32, 35, 0),
REGULATOR_LINEAR_RANGE(1500000, 36, 55, 100000),
REGULATOR_LINEAR_RANGE(3400000, 56, 63, 0),
-
};
-struct regulator_linear_range buck4_ranges[] = {
+static const struct regulator_linear_range buck4_ranges[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 27, 25000),
REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
@@ -113,24 +93,21 @@ struct regulator_linear_range buck4_ranges[] = {
REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0),
REGULATOR_LINEAR_RANGE(1500000, 36, 60, 100000),
REGULATOR_LINEAR_RANGE(3900000, 61, 63, 0),
-
};
-struct regulator_linear_range ldo1_ranges[] = {
+static const struct regulator_linear_range ldo1_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
-
};
-struct regulator_linear_range ldo2_ranges[] = {
+static const struct regulator_linear_range ldo2_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
-
};
-struct regulator_linear_range ldo3_ranges[] = {
+static const struct regulator_linear_range ldo3_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
@@ -138,18 +115,18 @@ struct regulator_linear_range ldo3_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 31, 31, 0),
};
-struct regulator_linear_range ldo5_ranges[] = {
+static const struct regulator_linear_range ldo5_ranges[] = {
REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
REGULATOR_LINEAR_RANGE(1700000, 8, 30, 100000),
REGULATOR_LINEAR_RANGE(3900000, 31, 31, 0),
};
-struct regulator_linear_range ldo6_ranges[] = {
+static const struct regulator_linear_range ldo6_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 24, 100000),
REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
};
-static struct regulator_ops stpmic1_ldo_ops = {
+static const struct regulator_ops stpmic1_ldo_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.is_enabled = regulator_is_enabled_regmap,
@@ -157,11 +134,10 @@ static struct regulator_ops stpmic1_ldo_ops = {
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .set_pull_down = regulator_set_pull_down_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
-static struct regulator_ops stpmic1_ldo3_ops = {
+static const struct regulator_ops stpmic1_ldo3_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_iterate,
.is_enabled = regulator_is_enabled_regmap,
@@ -169,21 +145,19 @@ static struct regulator_ops stpmic1_ldo3_ops = {
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .set_pull_down = regulator_set_pull_down_regmap,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
-static struct regulator_ops stpmic1_ldo4_fixed_regul_ops = {
+static const struct regulator_ops stpmic1_ldo4_fixed_regul_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .set_pull_down = regulator_set_pull_down_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
-static struct regulator_ops stpmic1_buck_ops = {
+static const struct regulator_ops stpmic1_buck_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.is_enabled = regulator_is_enabled_regmap,
@@ -197,20 +171,27 @@ static struct regulator_ops stpmic1_buck_ops = {
.set_over_current_protection = stpmic1_set_icc,
};
-static struct regulator_ops stpmic1_vref_ddr_ops = {
+static const struct regulator_ops stpmic1_vref_ddr_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .set_pull_down = regulator_set_pull_down_regmap,
};
-static struct regulator_ops stpmic1_switch_regul_ops = {
+static const struct regulator_ops stpmic1_boost_regul_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.set_over_current_protection = stpmic1_set_icc,
};
+static const struct regulator_ops stpmic1_switch_regul_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_over_current_protection = stpmic1_set_icc,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
#define REG_LDO(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
@@ -227,8 +208,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
- .pull_down_reg = ids##_PULL_DOWN_REG, \
- .pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
@@ -252,8 +231,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.bypass_mask = LDO_BYPASS_MASK, \
.bypass_val_on = LDO_BYPASS_MASK, \
.bypass_val_off = 0, \
- .pull_down_reg = ids##_PULL_DOWN_REG, \
- .pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
@@ -271,8 +248,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
- .pull_down_reg = ids##_PULL_DOWN_REG, \
- .pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
@@ -312,12 +287,47 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.enable_val = 1, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
- .pull_down_reg = ids##_PULL_DOWN_REG, \
- .pull_down_mask = ids##_PULL_DOWN_MASK, \
.supply_name = #base, \
}
-#define REG_SWITCH(ids, base, reg, mask, val) { \
+#define REG_BOOST(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 1, \
+ .ops = &stpmic1_boost_regul_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 0, \
+ .fixed_uV = 5000000, \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = BOOST_ENABLED, \
+ .enable_val = BOOST_ENABLED, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .supply_name = #base, \
+}
+
+#define REG_VBUS_OTG(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 1, \
+ .ops = &stpmic1_switch_regul_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 0, \
+ .fixed_uV = 5000000, \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = USBSW_OTG_SWITCH_ENABLED, \
+ .enable_val = USBSW_OTG_SWITCH_ENABLED, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .supply_name = #base, \
+ .active_discharge_reg = BST_SW_CR, \
+ .active_discharge_mask = VBUS_OTG_DISCHARGE, \
+ .active_discharge_on = VBUS_OTG_DISCHARGE, \
+}
+
+#define REG_SW_OUT(ids, base) { \
.name = #ids, \
.id = STPMIC1_##ids, \
.n_voltages = 1, \
@@ -326,15 +336,18 @@ static struct regulator_ops stpmic1_switch_regul_ops = {
.owner = THIS_MODULE, \
.min_uV = 0, \
.fixed_uV = 5000000, \
- .enable_reg = (reg), \
- .enable_mask = (mask), \
- .enable_val = (val), \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = SWIN_SWOUT_ENABLED, \
+ .enable_val = SWIN_SWOUT_ENABLED, \
.disable_val = 0, \
.enable_time = PMIC_ENABLE_TIME_US, \
.supply_name = #base, \
+ .active_discharge_reg = BST_SW_CR, \
+ .active_discharge_mask = SW_OUT_DISCHARGE, \
+ .active_discharge_on = SW_OUT_DISCHARGE, \
}
-struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
+static const struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
[STPMIC1_BUCK1] = {
.desc = REG_BUCK(BUCK1, buck1),
.icc_reg = BUCKS_ICCTO_CR,
@@ -411,23 +424,17 @@ struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
.mask_reset_mask = BIT(6),
},
[STPMIC1_BOOST] = {
- .desc = REG_SWITCH(BOOST, boost, BST_SW_CR,
- BOOST_ENABLED,
- BOOST_ENABLED),
+ .desc = REG_BOOST(BOOST, boost),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(6),
},
[STPMIC1_VBUS_OTG] = {
- .desc = REG_SWITCH(VBUS_OTG, pwr_sw1, BST_SW_CR,
- USBSW_OTG_SWITCH_ENABLED,
- USBSW_OTG_SWITCH_ENABLED),
+ .desc = REG_VBUS_OTG(VBUS_OTG, pwr_sw1),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(4),
},
[STPMIC1_SW_OUT] = {
- .desc = REG_SWITCH(SW_OUT, pwr_sw2, BST_SW_CR,
- SWIN_SWOUT_ENABLED,
- SWIN_SWOUT_ENABLED),
+ .desc = REG_SW_OUT(SW_OUT, pwr_sw2),
.icc_reg = BUCKS_ICCTO_CR,
.icc_mask = BIT(5),
},
@@ -448,8 +455,9 @@ static unsigned int stpmic1_map_mode(unsigned int mode)
static unsigned int stpmic1_get_mode(struct regulator_dev *rdev)
{
int value;
+ struct regmap *regmap = rdev_get_regmap(rdev);
- regmap_read(rdev->regmap, rdev->desc->enable_reg, &value);
+ regmap_read(regmap, rdev->desc->enable_reg, &value);
if (value & STPMIC1_BUCK_MODE_LP)
return REGULATOR_MODE_STANDBY;
@@ -460,6 +468,7 @@ static unsigned int stpmic1_get_mode(struct regulator_dev *rdev)
static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
int value;
+ struct regmap *regmap = rdev_get_regmap(rdev);
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -472,17 +481,18 @@ static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode)
return -EINVAL;
}
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ return regmap_update_bits(regmap, rdev->desc->enable_reg,
STPMIC1_BUCK_MODE_LP, value);
}
static int stpmic1_set_icc(struct regulator_dev *rdev)
{
- struct stpmic1_regulator *regul = rdev_get_drvdata(rdev);
+ struct stpmic1_regulator_cfg *cfg = rdev_get_drvdata(rdev);
+ struct regmap *regmap = rdev_get_regmap(rdev);
/* enable switch off in case of over current */
- return regmap_update_bits(regul->regmap, regul->cfg->icc_reg,
- regul->cfg->icc_mask, regul->cfg->icc_mask);
+ return regmap_update_bits(regmap, cfg->icc_reg, cfg->icc_mask,
+ cfg->icc_mask);
}
static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data)
@@ -501,46 +511,13 @@ static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int stpmic1_regulator_init(struct platform_device *pdev,
- struct regulator_dev *rdev)
-{
- struct stpmic1_regulator *regul = rdev_get_drvdata(rdev);
- int ret = 0;
-
- /* set mask reset */
- if (regul->mask_reset && regul->cfg->mask_reset_reg != 0) {
- ret = regmap_update_bits(regul->regmap,
- regul->cfg->mask_reset_reg,
- regul->cfg->mask_reset_mask,
- regul->cfg->mask_reset_mask);
- if (ret) {
- dev_err(&pdev->dev, "set mask reset failed\n");
- return ret;
- }
- }
-
- /* setup an irq handler for over-current detection */
- if (regul->irq_curlim > 0) {
- ret = devm_request_threaded_irq(&pdev->dev,
- regul->irq_curlim, NULL,
- stpmic1_curlim_irq_handler,
- IRQF_ONESHOT | IRQF_SHARED,
- pdev->name, rdev);
- if (ret) {
- dev_err(&pdev->dev, "Request IRQ failed\n");
- return ret;
- }
- }
- return 0;
-}
-
#define MATCH(_name, _id) \
[STPMIC1_##_id] = { \
.name = #_name, \
.desc = &stpmic1_regulator_cfgs[STPMIC1_##_id].desc, \
}
-static struct of_regulator_match stpmic1_regulators_matches[] = {
+static struct of_regulator_match stpmic1_matches[] = {
MATCH(buck1, BUCK1),
MATCH(buck2, BUCK2),
MATCH(buck3, BUCK3),
@@ -557,94 +534,75 @@ static struct of_regulator_match stpmic1_regulators_matches[] = {
MATCH(pwr_sw2, SW_OUT),
};
-static int stpmic1_regulator_parse_dt(void *driver_data)
-{
- struct stpmic1_regulator *regul =
- (struct stpmic1_regulator *)driver_data;
-
- if (!regul)
- return -EINVAL;
-
- if (of_get_property(regul->reg_node, "st,mask-reset", NULL))
- regul->mask_reset = 1;
-
- regul->irq_curlim = of_irq_get(regul->reg_node, 0);
-
- return 0;
-}
-
-static struct
-regulator_dev *stpmic1_regulator_register(struct platform_device *pdev, int id,
- struct regulator_init_data *init_data,
- struct stpmic1_regulator *regul)
+static int stpmic1_regulator_register(struct platform_device *pdev, int id,
+ struct of_regulator_match *match,
+ const struct stpmic1_regulator_cfg *cfg)
{
struct stpmic1 *pmic_dev = dev_get_drvdata(pdev->dev.parent);
struct regulator_dev *rdev;
struct regulator_config config = {};
+ int ret = 0;
+ int irq;
config.dev = &pdev->dev;
- config.init_data = init_data;
- config.of_node = stpmic1_regulators_matches[id].of_node;
+ config.init_data = match->init_data;
+ config.of_node = match->of_node;
config.regmap = pmic_dev->regmap;
- config.driver_data = regul;
-
- regul->regul_id = id;
- regul->reg_node = config.of_node;
- regul->cfg = &stpmic1_regulator_cfgs[id];
- regul->regmap = pmic_dev->regmap;
+ config.driver_data = (void *)cfg;
- rdev = devm_regulator_register(&pdev->dev, &regul->cfg->desc, &config);
+ rdev = devm_regulator_register(&pdev->dev, &cfg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s regulator\n",
- regul->cfg->desc.name);
+ cfg->desc.name);
+ return PTR_ERR(rdev);
+ }
+
+ /* set mask reset */
+ if (of_get_property(config.of_node, "st,mask-reset", NULL) &&
+ cfg->mask_reset_reg != 0) {
+ ret = regmap_update_bits(pmic_dev->regmap,
+ cfg->mask_reset_reg,
+ cfg->mask_reset_mask,
+ cfg->mask_reset_mask);
+ if (ret) {
+ dev_err(&pdev->dev, "set mask reset failed\n");
+ return ret;
+ }
}
- return rdev;
+ /* setup an irq handler for over-current detection */
+ irq = of_irq_get(config.of_node, 0);
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev,
+ irq, NULL,
+ stpmic1_curlim_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ pdev->name, rdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ failed\n");
+ return ret;
+ }
+ }
+ return 0;
}
static int stpmic1_regulator_probe(struct platform_device *pdev)
{
- struct regulator_dev *rdev;
- struct stpmic1_regulator *regul;
- struct regulator_init_data *init_data;
- struct device_node *np;
int i, ret;
- np = pdev->dev.of_node;
-
- ret = of_regulator_match(&pdev->dev, np,
- stpmic1_regulators_matches,
- ARRAY_SIZE(stpmic1_regulators_matches));
+ ret = of_regulator_match(&pdev->dev, pdev->dev.of_node, stpmic1_matches,
+ ARRAY_SIZE(stpmic1_matches));
if (ret < 0) {
dev_err(&pdev->dev,
"Error in PMIC regulator device tree node");
return ret;
}
- regul = devm_kzalloc(&pdev->dev, ARRAY_SIZE(stpmic1_regulator_cfgs) *
- sizeof(struct stpmic1_regulator),
- GFP_KERNEL);
- if (!regul)
- return -ENOMEM;
-
for (i = 0; i < ARRAY_SIZE(stpmic1_regulator_cfgs); i++) {
- /* Parse DT & find regulators to register */
- init_data = stpmic1_regulators_matches[i].init_data;
- if (init_data)
- init_data->regulator_init = &stpmic1_regulator_parse_dt;
-
- rdev = stpmic1_regulator_register(pdev, i, init_data, regul);
- if (IS_ERR(rdev))
- return PTR_ERR(rdev);
-
- ret = stpmic1_regulator_init(pdev, rdev);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to initialize regulator %d\n", ret);
+ ret = stpmic1_regulator_register(pdev, i, &stpmic1_matches[i],
+ &stpmic1_regulator_cfgs[i]);
+ if (ret < 0)
return ret;
- }
-
- regul++;
}
dev_dbg(&pdev->dev, "stpmic1_regulator driver probed\n");
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 6209beee1018..95708d34876b 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -188,7 +188,8 @@ static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
.set_suspend_disable = tps65218_pmic_set_suspend_disable,
};
-static const int ls3_currents[] = { 100, 200, 500, 1000 };
+static const int ls3_currents[] = { 100000, 200000, 500000, 1000000 };
+
static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev,
int lim_uA)
@@ -204,7 +205,8 @@ static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev,
return -EINVAL;
return tps65218_set_bits(tps, dev->desc->csel_reg, dev->desc->csel_mask,
- index << 2, TPS65218_PROTECT_L1);
+ index << __builtin_ctz(dev->desc->csel_mask),
+ TPS65218_PROTECT_L1);
}
static int tps65218_pmic_set_current_limit(struct regulator_dev *dev,
@@ -214,7 +216,7 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev,
unsigned int num_currents = ARRAY_SIZE(ls3_currents);
struct tps65218 *tps = rdev_get_drvdata(dev);
- while (index < num_currents && ls3_currents[index] < max_uA)
+ while (index < num_currents && ls3_currents[index] <= max_uA)
index++;
index--;
@@ -223,7 +225,8 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev,
return -EINVAL;
return tps65218_set_bits(tps, dev->desc->csel_reg, dev->desc->csel_mask,
- index << 2, TPS65218_PROTECT_L1);
+ index << __builtin_ctz(dev->desc->csel_mask),
+ TPS65218_PROTECT_L1);
}
static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
@@ -236,12 +239,13 @@ static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
if (retval < 0)
return retval;
- index = (index & dev->desc->csel_mask) >> 2;
+ index = (index & dev->desc->csel_mask) >>
+ __builtin_ctz(dev->desc->csel_mask);
return ls3_currents[index];
}
-static struct regulator_ops tps65218_ls3_ops = {
+static struct regulator_ops tps65218_ls23_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
@@ -303,8 +307,13 @@ static const struct regulator_desc regulators[] = {
TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges,
2, 0, 0, TPS65218_REG_SEQ6,
TPS65218_SEQ6_LDO1_SEQ_MASK),
+ TPS65218_REGULATOR("LS2", "regulator-ls2", TPS65218_LS_2,
+ REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0,
+ TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS2_EN,
+ TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS2ILIM_MASK,
+ NULL, 0, 0, 0, 0, 0),
TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3,
- REGULATOR_CURRENT, tps65218_ls3_ops, 0, 0, 0,
+ REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0,
TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN,
TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK,
NULL, 0, 0, 0, 0, 0),
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 884c7505ed91..402ea43c77d1 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -576,14 +576,9 @@ static int twlreg_probe(struct platform_device *pdev)
struct regulator_init_data *initdata;
struct regulation_constraints *c;
struct regulator_dev *rdev;
- const struct of_device_id *match;
struct regulator_config config = { };
- match = of_match_device(twl_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- template = match->data;
+ template = of_device_get_match_data(&pdev->dev);
if (!template)
return -ENODEV;
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 219cbd910dbf..15f19df6bc5d 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -31,9 +31,6 @@ struct twlreg_info {
/* twl resource ID, for resource control state machine */
u8 id;
- /* chip constraints on regulator behavior */
- u16 min_mV;
-
u8 flags;
/* used by regulator core */
@@ -247,32 +244,11 @@ static int twl6030coresmps_get_voltage(struct regulator_dev *rdev)
return -ENODEV;
}
-static struct regulator_ops twl6030coresmps_ops = {
+static const struct regulator_ops twl6030coresmps_ops = {
.set_voltage = twl6030coresmps_set_voltage,
.get_voltage = twl6030coresmps_get_voltage,
};
-static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- switch (sel) {
- case 0:
- return 0;
- case 1 ... 24:
- /* Linear mapping from 00000001 to 00011000:
- * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001)
- */
- return (info->min_mV + 100 * (sel - 1)) * 1000;
- case 25 ... 30:
- return -EINVAL;
- case 31:
- return 2750000;
- default:
- return -EINVAL;
- }
-}
-
static int
twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
@@ -290,8 +266,8 @@ static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
return vsel;
}
-static struct regulator_ops twl6030ldo_ops = {
- .list_voltage = twl6030ldo_list_voltage,
+static const struct regulator_ops twl6030ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = twl6030ldo_set_voltage_sel,
.get_voltage_sel = twl6030ldo_get_voltage_sel,
@@ -305,7 +281,7 @@ static struct regulator_ops twl6030ldo_ops = {
.get_status = twl6030reg_get_status,
};
-static struct regulator_ops twl6030fixed_ops = {
+static const struct regulator_ops twl6030fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = twl6030reg_enable,
@@ -496,7 +472,7 @@ static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
}
-static struct regulator_ops twlsmps_ops = {
+static const struct regulator_ops twlsmps_ops = {
.list_voltage = twl6030smps_list_voltage,
.map_voltage = twl6030smps_map_voltage,
@@ -513,6 +489,11 @@ static struct regulator_ops twlsmps_ops = {
};
/*----------------------------------------------------------------------*/
+static const struct regulator_linear_range twl6030ldo_linear_range[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 0, 0),
+ REGULATOR_LINEAR_RANGE(1000000, 1, 24, 100000),
+ REGULATOR_LINEAR_RANGE(2750000, 31, 31, 0),
+};
#define TWL6030_ADJUSTABLE_SMPS(label) \
static const struct twlreg_info TWL6030_INFO_##label = { \
@@ -525,28 +506,30 @@ static const struct twlreg_info TWL6030_INFO_##label = { \
}, \
}
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts) \
+#define TWL6030_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6030_INFO_##label = { \
.base = offset, \
- .min_mV = min_mVolts, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
.n_voltages = 32, \
+ .linear_ranges = twl6030ldo_linear_range, \
+ .n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts) \
+#define TWL6032_ADJUSTABLE_LDO(label, offset) \
static const struct twlreg_info TWL6032_INFO_##label = { \
.base = offset, \
- .min_mV = min_mVolts, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
.n_voltages = 32, \
+ .linear_ranges = twl6030ldo_linear_range, \
+ .n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -557,7 +540,6 @@ static const struct twlreg_info TWL6032_INFO_##label = { \
static const struct twlreg_info TWLFIXED_INFO_##label = { \
.base = offset, \
.id = 0, \
- .min_mV = mVolts, \
.desc = { \
.name = #label, \
.id = TWL6030##_REG_##label, \
@@ -574,7 +556,6 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \
#define TWL6032_ADJUSTABLE_SMPS(label, offset) \
static const struct twlreg_info TWLSMPS_INFO_##label = { \
.base = offset, \
- .min_mV = 600, \
.desc = { \
.name = #label, \
.id = TWL6032_REG_##label, \
@@ -592,22 +573,22 @@ static const struct twlreg_info TWLSMPS_INFO_##label = { \
TWL6030_ADJUSTABLE_SMPS(VDD1);
TWL6030_ADJUSTABLE_SMPS(VDD2);
TWL6030_ADJUSTABLE_SMPS(VDD3);
-TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000);
-TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000);
-TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000);
-TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000);
-TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000);
-TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000);
+TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54);
+TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58);
+TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c);
+TWL6030_ADJUSTABLE_LDO(VMMC, 0x68);
+TWL6030_ADJUSTABLE_LDO(VPP, 0x6c);
+TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74);
/* 6025 are renamed compared to 6030 versions */
-TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000);
-TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000);
-TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000);
-TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000);
+TWL6032_ADJUSTABLE_LDO(LDO2, 0x54);
+TWL6032_ADJUSTABLE_LDO(LDO4, 0x58);
+TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c);
+TWL6032_ADJUSTABLE_LDO(LDO5, 0x68);
+TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c);
+TWL6032_ADJUSTABLE_LDO(LDO7, 0x74);
+TWL6032_ADJUSTABLE_LDO(LDO6, 0x60);
+TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64);
+TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70);
TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0);
TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0);
TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);
@@ -687,14 +668,9 @@ static int twlreg_probe(struct platform_device *pdev)
struct regulator_init_data *initdata;
struct regulation_constraints *c;
struct regulator_dev *rdev;
- const struct of_device_id *match;
struct regulator_config config = { };
- match = of_match_device(twl_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- template = match->data;
+ template = of_device_get_match_data(&pdev->dev);
if (!template)
return -ENODEV;
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index abf22acbd13e..9026d5a3e964 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -32,7 +32,7 @@ struct uniphier_regulator_priv {
const struct uniphier_regulator_soc_data *data;
};
-static struct regulator_ops uniphier_regulator_ops = {
+static const struct regulator_ops uniphier_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -87,8 +87,10 @@ static int uniphier_regulator_probe(struct platform_device *pdev)
}
regmap = devm_regmap_init_mmio(dev, base, priv->data->regconf);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto out_rst_assert;
+ }
config.dev = dev;
config.driver_data = priv;
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 5a5bc4bb08d2..12b422373580 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -205,33 +205,10 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data)
* BUCKV specifics
*/
-static int wm831x_buckv_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector <= 0x8)
- return 600000;
- if (selector <= WM831X_BUCKV_MAX_SELECTOR)
- return 600000 + ((selector - 0x8) * 12500);
- return -EINVAL;
-}
-
-static int wm831x_buckv_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- u16 vsel;
-
- if (min_uV < 600000)
- vsel = 0;
- else if (min_uV <= 1800000)
- vsel = DIV_ROUND_UP(min_uV - 600000, 12500) + 8;
- else
- return -EINVAL;
-
- if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV)
- return -EINVAL;
-
- return vsel;
-}
+static const struct regulator_linear_range wm831x_buckv_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 0x7, 0),
+ REGULATOR_LINEAR_RANGE(600000, 0x8, 0x68, 12500),
+};
static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
{
@@ -309,7 +286,7 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
int vsel;
- vsel = wm831x_buckv_map_voltage(rdev, uV, uV);
+ vsel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (vsel < 0)
return vsel;
@@ -327,52 +304,18 @@ static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
}
/* Current limit options */
-static u16 wm831x_dcdc_ilim[] = {
- 125, 250, 375, 500, 625, 750, 875, 1000
+static const unsigned int wm831x_dcdc_ilim[] = {
+ 125000, 250000, 375000, 500000, 625000, 750000, 875000, 1000000
};
-static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = dcdc->wm831x;
- u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2;
- int i;
-
- for (i = ARRAY_SIZE(wm831x_dcdc_ilim) - 1; i >= 0; i--) {
- if ((min_uA <= wm831x_dcdc_ilim[i]) &&
- (wm831x_dcdc_ilim[i] <= max_uA))
- return wm831x_set_bits(wm831x, reg,
- WM831X_DC1_HC_THR_MASK,
- i << WM831X_DC1_HC_THR_SHIFT);
- }
-
- return -EINVAL;
-}
-
-static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev)
-{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = dcdc->wm831x;
- u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2;
- int val;
-
- val = wm831x_reg_read(wm831x, reg);
- if (val < 0)
- return val;
-
- val = (val & WM831X_DC1_HC_THR_MASK) >> WM831X_DC1_HC_THR_SHIFT;
- return wm831x_dcdc_ilim[val];
-}
-
static const struct regulator_ops wm831x_buckv_ops = {
.set_voltage_sel = wm831x_buckv_set_voltage_sel,
.get_voltage_sel = wm831x_buckv_get_voltage_sel,
- .list_voltage = wm831x_buckv_list_voltage,
- .map_voltage = wm831x_buckv_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.set_suspend_voltage = wm831x_buckv_set_suspend_voltage,
- .set_current_limit = wm831x_buckv_set_current_limit,
- .get_current_limit = wm831x_buckv_get_current_limit,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -492,10 +435,16 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
dcdc->desc.id = id;
dcdc->desc.type = REGULATOR_VOLTAGE;
dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1;
+ dcdc->desc.linear_ranges = wm831x_buckv_ranges;
+ dcdc->desc.n_linear_ranges = ARRAY_SIZE(wm831x_buckv_ranges);
dcdc->desc.ops = &wm831x_buckv_ops;
dcdc->desc.owner = THIS_MODULE;
dcdc->desc.enable_reg = WM831X_DCDC_ENABLE;
dcdc->desc.enable_mask = 1 << id;
+ dcdc->desc.csel_reg = dcdc->base + WM831X_DCDC_CONTROL_2;
+ dcdc->desc.csel_mask = WM831X_DC1_HC_THR_MASK;
+ dcdc->desc.n_current_limits = ARRAY_SIZE(wm831x_dcdc_ilim);
+ dcdc->desc.curr_table = wm831x_dcdc_ilim;
ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
if (ret < 0) {
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a3c20e3a8b7c..3337b1e80412 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2009,7 +2009,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
unsigned long mem_addr, mem_len;
- int retval = -ENODEV;
+ int retval;
retval = pci_enable_device(pdev);
if (retval) {
@@ -2020,8 +2020,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
@@ -2240,8 +2242,10 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index cd096104bcec..dda6fa857709 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1573,8 +1573,10 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
@@ -1805,8 +1807,10 @@ static int twl_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 07efcb9b5b94..bbdae67774f0 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -769,9 +769,11 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto Err_remove;
- err = -ENODEV;
- if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
+ err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = -ENODEV;
asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
goto Err_remove;
}
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 42a0caf6740d..88880a66a189 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -727,7 +727,7 @@ bfad_init_timer(struct bfad_s *bfad)
int
bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
{
- int rc = -ENODEV;
+ int rc = -ENODEV;
if (pci_enable_device(pdev)) {
printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
@@ -739,8 +739,12 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ if (rc) {
+ rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
goto out_release_region;
}
@@ -1534,6 +1538,7 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
{
struct bfad_s *bfad = pci_get_drvdata(pdev);
u8 byte;
+ int rc;
dev_printk(KERN_ERR, &pdev->dev,
"bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
@@ -1561,8 +1566,11 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32)))
+ rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
+ DMA_BIT_MASK(32));
+ if (rc)
goto out_disable_device;
if (restart_bfa(bfad) == -1)
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cf629380a981..616b25bf7941 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -210,8 +210,11 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv)
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rv) {
+ rv = -ENODEV;
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_release_regions;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index eed7fc5b3389..bc17fa0d8375 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2323,6 +2323,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct Scsi_Host *shost;
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
+ int error;
shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
if (!shost) {
@@ -2343,8 +2344,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+ error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index c92b3822c408..e0570fd8466e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2447,10 +2447,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
goto err_out_disable_device;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
- rc = -EIO;
+ rc = -ENODEV;
goto err_out_regions;
}
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 3eedfd4f8f57..251c084a6ff0 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1292,6 +1292,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
dma_addr_t start_phy;
void *start_virt;
u32 offset, i, req_size;
+ int rc;
dprintk("hptiop_probe(%p)\n", pcidev);
@@ -1308,9 +1309,12 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
/* Enable 64bit DMA if possible */
iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
- if (dma_set_mask(&pcidev->dev,
- DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) ||
- dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask(&pcidev->dev,
+ DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
+ if (rc)
+ rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
+
+ if (rc) {
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
goto disable_pci_device;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b8d325ce8754..120fc520f27a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
return -ENODATA;
+ spin_lock_bh(&conn->session->back_lock);
+ if (conn->task == NULL) {
+ spin_unlock_bh(&conn->session->back_lock);
+ return -ENODATA;
+ }
__iscsi_get_task(task);
+ spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->frwd_lock);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 17eb4185f29d..f21c93bbb35c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev(
rphy = sas_end_device_alloc(phy->port);
if (!rphy)
goto out_free;
+ rphy->identify.phy_identifier = phy_id;
child->rphy = rphy;
get_device(&rphy->dev);
@@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
get_device(&rphy->dev);
+ rphy->identify.phy_identifier = phy_id;
sas_fill_in_rphy(child, rphy);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index bede11e16349..e1129260ed18 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -7361,15 +7361,18 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
unsigned long bar0map_len, bar2map_len;
int i, hbq_count;
void *ptr;
- int error = -ENODEV;
+ int error;
if (!pdev)
- return error;
+ return -ENODEV;
/* Set the device DMA mask size */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
return error;
+ error = -ENODEV;
/* Get the bus address of Bar0 and Bar2 and the number of bytes
* required by each mapping.
@@ -9742,11 +9745,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
uint32_t if_type;
if (!pdev)
- return error;
+ return -ENODEV;
/* Set the device DMA mask size */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
return error;
/*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d65ac584eba..a6828391d6b3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
set_host_byte(cmd, DID_OK);
return BLK_STS_TARGET;
case DID_NEXUS_FAILURE:
+ set_host_byte(cmd, DID_OK);
return BLK_STS_NEXUS;
case DID_ALLOC_FAILURE:
set_host_byte(cmd, DID_OK);
@@ -2597,7 +2598,6 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- WARN_ON_ONCE(!sdev->quiesced_by);
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
if (sdev->sdev_state == SDEV_QUIESCE)
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index fff86940388b..a340af797a85 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
return -EOPNOTSUPP;
/*
- * Get a reply buffer for the number of requested zones plus a header.
- * For ATA, buffers must be aligned to 512B.
+ * Get a reply buffer for the number of requested zones plus a header,
+ * without exceeding the device maximum command size. For ATA disks,
+ * buffers must be aligned to 512B.
*/
- buflen = roundup((nrz + 1) * 64, 512);
+ buflen = min(queue_max_hw_sectors(disk->queue) << 9,
+ roundup((nrz + 1) * 64, 512));
buf = kmalloc(buflen, gfp_mask);
if (!buf)
return -ENOMEM;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 9f89cb134549..f761655e2a36 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -63,7 +63,7 @@ config SPI_ALTERA
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
- depends on ATH79 && GPIOLIB
+ depends on ATH79 || COMPILE_TEST
select SPI_BITBANG
help
This enables support for the SPI controller present on the
@@ -268,6 +268,27 @@ config SPI_FSL_LPSPI
help
This enables Freescale i.MX LPSPI controllers in master mode.
+config SPI_FSL_QUADSPI
+ tristate "Freescale QSPI controller"
+ depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ Up to four flash chips can be connected on two buses with two
+ chipselects each.
+ This controller does not support generic SPI messages. It only
+ supports the high-level SPI memory interface.
+
+config SPI_NXP_FLEXSPI
+ tristate "NXP Flex SPI controller"
+ depends on ARCH_LAYERSCAPE || HAS_IOMEM
+ help
+ This enables support for the Flex SPI controller in master mode.
+ Up to four slave devices can be connected on two buses with two
+ chipselects each.
+ This controller does not support generic SPI messages and only
+ supports the high-level SPI memory interface.
+
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GPIOLIB || COMPILE_TEST
@@ -296,8 +317,7 @@ config SPI_IMX
depends on ARCH_MXC || COMPILE_TEST
select SPI_BITBANG
help
- This enables using the Freescale i.MX SPI controllers in master
- mode.
+ This enables support for the Freescale i.MX SPI controllers.
config SPI_JCORE
tristate "J-Core SPI Master"
@@ -372,7 +392,7 @@ config SPI_FSL_DSPI
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
- mode. VF610 platform uses the controller.
+ mode. VF610, LS1021A and ColdFire platforms uses the controller.
config SPI_FSL_ESPI
tristate "Freescale eSPI controller"
@@ -631,6 +651,12 @@ config SPI_SH_HSPI
help
SPI driver for SuperH HSPI blocks.
+config SPI_SIFIVE
+ tristate "SiFive SPI controller"
+ depends on HAS_IOMEM
+ help
+ This exposes the SPI controller IP from SiFive.
+
config SPI_SIRF
tristate "CSR SiRFprimaII SPI controller"
depends on SIRF_DMA
@@ -665,7 +691,7 @@ config SPI_STM32
tristate "STMicroelectronics STM32 SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
help
- SPI driver for STMicroelectonics STM32 SoCs.
+ SPI driver for STMicroelectronics STM32 SoCs.
STM32 SPI controller supports DMA and PIO modes. When DMA
is not available, the driver automatically falls back to
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index f29627040dfb..d8fc03c9faa2 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
+obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
+obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
@@ -93,6 +95,7 @@ obj-$(CONFIG_SPI_SH) += spi-sh.o
obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
+obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index ddc712410812..fffc21cd5f79 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Atmel QSPI Controller
*
@@ -7,31 +8,19 @@
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
* Author: Piotr Bugalski <bugalski.piotr@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
-#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
-#include <linux/of.h>
-
#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/spi/spi-mem.h>
/* QSPI register offsets */
@@ -47,7 +36,9 @@
#define QSPI_IAR 0x0030 /* Instruction Address Register */
#define QSPI_ICR 0x0034 /* Instruction Code Register */
+#define QSPI_WICR 0x0034 /* Write Instruction Code Register */
#define QSPI_IFR 0x0038 /* Instruction Frame Register */
+#define QSPI_RICR 0x003C /* Read Instruction Code Register */
#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
#define QSPI_SKR 0x0044 /* Scrambling Key Register */
@@ -100,7 +91,7 @@
#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
-/* Bitfields in QSPI_ICR (Instruction Code Register) */
+/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
#define QSPI_ICR_INST_MASK GENMASK(7, 0)
#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
@@ -125,14 +116,12 @@
#define QSPI_IFR_OPTL_4BIT (2 << 8)
#define QSPI_IFR_OPTL_8BIT (3 << 8)
#define QSPI_IFR_ADDRL BIT(10)
-#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12)
-#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12)
-#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12)
-#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12)
-#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13)
+#define QSPI_IFR_TFRTYP_MEM BIT(12)
+#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
#define QSPI_IFR_CRM BIT(14)
#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
#define QSPI_SMR_SCREN BIT(0)
@@ -148,24 +137,31 @@
#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+struct atmel_qspi_caps {
+ bool has_qspick;
+ bool has_ricr;
+};
struct atmel_qspi {
void __iomem *regs;
void __iomem *mem;
- struct clk *clk;
+ struct clk *pclk;
+ struct clk *qspick;
struct platform_device *pdev;
+ const struct atmel_qspi_caps *caps;
u32 pending;
+ u32 mr;
struct completion cmd_completion;
};
-struct qspi_mode {
+struct atmel_qspi_mode {
u8 cmd_buswidth;
u8 addr_buswidth;
u8 data_buswidth;
u32 config;
};
-static const struct qspi_mode sama5d2_qspi_modes[] = {
+static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
@@ -175,19 +171,8 @@ static const struct qspi_mode sama5d2_qspi_modes[] = {
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
-/* Register access functions */
-static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg)
-{
- return readl_relaxed(aq->regs + reg);
-}
-
-static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
-{
- writel_relaxed(value, aq->regs + reg);
-}
-
-static inline bool is_compatible(const struct spi_mem_op *op,
- const struct qspi_mode *mode)
+static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
+ const struct atmel_qspi_mode *mode)
{
if (op->cmd.buswidth != mode->cmd_buswidth)
return false;
@@ -201,21 +186,21 @@ static inline bool is_compatible(const struct spi_mem_op *op,
return true;
}
-static int find_mode(const struct spi_mem_op *op)
+static int atmel_qspi_find_mode(const struct spi_mem_op *op)
{
u32 i;
- for (i = 0; i < ARRAY_SIZE(sama5d2_qspi_modes); i++)
- if (is_compatible(op, &sama5d2_qspi_modes[i]))
+ for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
+ if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
return i;
- return -1;
+ return -ENOTSUPP;
}
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- if (find_mode(op) < 0)
+ if (atmel_qspi_find_mode(op) < 0)
return false;
/* special case not supported by hardware */
@@ -226,29 +211,37 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
return true;
}
-static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
+ const struct spi_mem_op *op, u32 *offset)
{
- struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
- int mode;
+ u32 iar, icr, ifr;
u32 dummy_cycles = 0;
- u32 iar, icr, ifr, sr;
- int err = 0;
+ int mode;
iar = 0;
icr = QSPI_ICR_INST(op->cmd.opcode);
ifr = QSPI_IFR_INSTEN;
- qspi_writel(aq, QSPI_MR, QSPI_MR_SMM);
-
- mode = find_mode(op);
+ mode = atmel_qspi_find_mode(op);
if (mode < 0)
- return -ENOTSUPP;
-
- ifr |= sama5d2_qspi_modes[mode].config;
+ return mode;
+ ifr |= atmel_qspi_modes[mode].config;
if (op->dummy.buswidth && op->dummy.nbytes)
dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
+ /*
+ * The controller allows 24 and 32-bit addressing while NAND-flash
+ * requires 16-bit long. Handling 8-bit long addresses is done using
+ * the option field. For the 16-bit addresses, the workaround depends
+ * of the number of requested dummy bits. If there are 8 or more dummy
+ * cycles, the address is shifted and sent with the first dummy byte.
+ * Otherwise opcode is disabled and the first byte of the address
+ * contains the command opcode (works only if the opcode and address
+ * use the same buswidth). The limitation is when the 16-bit address is
+ * used without enough dummy cycles and the opcode is using a different
+ * buswidth than the address.
+ */
if (op->addr.buswidth) {
switch (op->addr.nbytes) {
case 0:
@@ -282,6 +275,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
}
}
+ /* offset of the data access in the QSPI memory space */
+ *offset = iar;
+
/* Set number of dummy cycles */
if (dummy_cycles)
ifr |= QSPI_IFR_NBDUM(dummy_cycles);
@@ -290,49 +286,82 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->data.nbytes)
ifr |= QSPI_IFR_DATAEN;
- if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
- ifr |= QSPI_IFR_TFRTYP_TRSFR_READ;
- else
- ifr |= QSPI_IFR_TFRTYP_TRSFR_WRITE;
+ /*
+ * If the QSPI controller is set in regular SPI mode, set it in
+ * Serial Memory Mode (SMM).
+ */
+ if (aq->mr != QSPI_MR_SMM) {
+ writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
+ aq->mr = QSPI_MR_SMM;
+ }
/* Clear pending interrupts */
- (void)qspi_readl(aq, QSPI_SR);
+ (void)readl_relaxed(aq->regs + QSPI_SR);
+
+ if (aq->caps->has_ricr) {
+ if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ ifr |= QSPI_IFR_APBTFRTYP_READ;
- /* Set QSPI Instruction Frame registers */
- qspi_writel(aq, QSPI_IAR, iar);
- qspi_writel(aq, QSPI_ICR, icr);
- qspi_writel(aq, QSPI_IFR, ifr);
+ /* Set QSPI Instruction Frame registers */
+ writel_relaxed(iar, aq->regs + QSPI_IAR);
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ writel_relaxed(icr, aq->regs + QSPI_RICR);
+ else
+ writel_relaxed(icr, aq->regs + QSPI_WICR);
+ writel_relaxed(ifr, aq->regs + QSPI_IFR);
+ } else {
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
+
+ /* Set QSPI Instruction Frame registers */
+ writel_relaxed(iar, aq->regs + QSPI_IAR);
+ writel_relaxed(icr, aq->regs + QSPI_ICR);
+ writel_relaxed(ifr, aq->regs + QSPI_IFR);
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
+ u32 sr, offset;
+ int err;
+
+ err = atmel_qspi_set_cfg(aq, op, &offset);
+ if (err)
+ return err;
/* Skip to the final steps if there is no data */
if (op->data.nbytes) {
/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)qspi_readl(aq, QSPI_IFR);
+ (void)readl_relaxed(aq->regs + QSPI_IFR);
/* Send/Receive data */
if (op->data.dir == SPI_MEM_DATA_IN)
- _memcpy_fromio(op->data.buf.in,
- aq->mem + iar, op->data.nbytes);
+ _memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
else
- _memcpy_toio(aq->mem + iar,
- op->data.buf.out, op->data.nbytes);
+ _memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
/* Release the chip-select */
- qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
+ writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR);
}
/* Poll INSTRuction End status */
- sr = qspi_readl(aq, QSPI_SR);
+ sr = readl_relaxed(aq->regs + QSPI_SR);
if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
return err;
/* Wait for INSTRuction End interrupt */
reinit_completion(&aq->cmd_completion);
aq->pending = sr & QSPI_SR_CMD_COMPLETED;
- qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED);
+ writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER);
if (!wait_for_completion_timeout(&aq->cmd_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
- qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED);
+ writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR);
return err;
}
@@ -361,7 +390,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
- src_rate = clk_get_rate(aq->clk);
+ src_rate = clk_get_rate(aq->pclk);
if (!src_rate)
return -EINVAL;
@@ -371,7 +400,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
scbr--;
scr = QSPI_SCR_SCBR(scbr);
- qspi_writel(aq, QSPI_SCR, scr);
+ writel_relaxed(scr, aq->regs + QSPI_SCR);
return 0;
}
@@ -379,21 +408,25 @@ static int atmel_qspi_setup(struct spi_device *spi)
static int atmel_qspi_init(struct atmel_qspi *aq)
{
/* Reset the QSPI controller */
- qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
+ writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR);
+
+ /* Set the QSPI controller by default in Serial Memory Mode */
+ writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
+ aq->mr = QSPI_MR_SMM;
/* Enable the QSPI controller */
- qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN);
+ writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR);
return 0;
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
{
- struct atmel_qspi *aq = (struct atmel_qspi *)dev_id;
+ struct atmel_qspi *aq = dev_id;
u32 status, mask, pending;
- status = qspi_readl(aq, QSPI_SR);
- mask = qspi_readl(aq, QSPI_IMR);
+ status = readl_relaxed(aq->regs + QSPI_SR);
+ mask = readl_relaxed(aq->regs + QSPI_IMR);
pending = status & mask;
if (!pending)
@@ -449,44 +482,74 @@ static int atmel_qspi_probe(struct platform_device *pdev)
}
/* Get the peripheral clock */
- aq->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(aq->clk)) {
+ aq->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(aq->pclk))
+ aq->pclk = devm_clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(aq->pclk)) {
dev_err(&pdev->dev, "missing peripheral clock\n");
- err = PTR_ERR(aq->clk);
+ err = PTR_ERR(aq->pclk);
goto exit;
}
/* Enable the peripheral clock */
- err = clk_prepare_enable(aq->clk);
+ err = clk_prepare_enable(aq->pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
goto exit;
}
+ aq->caps = of_device_get_match_data(&pdev->dev);
+ if (!aq->caps) {
+ dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (aq->caps->has_qspick) {
+ /* Get the QSPI system clock */
+ aq->qspick = devm_clk_get(&pdev->dev, "qspick");
+ if (IS_ERR(aq->qspick)) {
+ dev_err(&pdev->dev, "missing system clock\n");
+ err = PTR_ERR(aq->qspick);
+ goto disable_pclk;
+ }
+
+ /* Enable the QSPI system clock */
+ err = clk_prepare_enable(aq->qspick);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to enable the QSPI system clock\n");
+ goto disable_pclk;
+ }
+ }
+
/* Request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "missing IRQ\n");
err = irq;
- goto disable_clk;
+ goto disable_qspick;
}
err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
0, dev_name(&pdev->dev), aq);
if (err)
- goto disable_clk;
+ goto disable_qspick;
err = atmel_qspi_init(aq);
if (err)
- goto disable_clk;
+ goto disable_qspick;
err = spi_register_controller(ctrl);
if (err)
- goto disable_clk;
+ goto disable_qspick;
return 0;
-disable_clk:
- clk_disable_unprepare(aq->clk);
+disable_qspick:
+ clk_disable_unprepare(aq->qspick);
+disable_pclk:
+ clk_disable_unprepare(aq->pclk);
exit:
spi_controller_put(ctrl);
@@ -499,8 +562,9 @@ static int atmel_qspi_remove(struct platform_device *pdev)
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
spi_unregister_controller(ctrl);
- qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS);
- clk_disable_unprepare(aq->clk);
+ writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR);
+ clk_disable_unprepare(aq->qspick);
+ clk_disable_unprepare(aq->pclk);
return 0;
}
@@ -508,7 +572,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
{
struct atmel_qspi *aq = dev_get_drvdata(dev);
- clk_disable_unprepare(aq->clk);
+ clk_disable_unprepare(aq->qspick);
+ clk_disable_unprepare(aq->pclk);
return 0;
}
@@ -517,7 +582,8 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
{
struct atmel_qspi *aq = dev_get_drvdata(dev);
- clk_prepare_enable(aq->clk);
+ clk_prepare_enable(aq->pclk);
+ clk_prepare_enable(aq->qspick);
return atmel_qspi_init(aq);
}
@@ -525,8 +591,22 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
atmel_qspi_resume);
+static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
+
+static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
+ .has_qspick = true,
+ .has_ricr = true,
+};
+
static const struct of_device_id atmel_qspi_dt_ids[] = {
- { .compatible = "atmel,sama5d2-qspi" },
+ {
+ .compatible = "atmel,sama5d2-qspi",
+ .data = &atmel_sama5d2_qspi_caps,
+ },
+ {
+ .compatible = "microchip,sam9x60-qspi",
+ .data = &atmel_sam9x60_qspi_caps,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 3f6b657394de..847f354ebef1 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -21,18 +21,26 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
-#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
-
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ath79_spi_platform.h>
+#include <linux/platform_data/spi-ath79.h>
#define DRV_NAME "ath79-spi"
#define ATH79_SPI_RRW_DELAY_FACTOR 12000
#define MHZ (1000 * 1000)
+#define AR71XX_SPI_REG_FS 0x00 /* Function Select */
+#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */
+#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */
+#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */
+
+#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */
+
+#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */
+#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */
+#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n))
+
struct ath79_spi {
struct spi_bitbang bitbang;
u32 ioc_base;
@@ -67,31 +75,14 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
+ u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
- if (is_active) {
- /* set initial clock polarity */
- if (spi->mode & SPI_CPOL)
- sp->ioc_base |= AR71XX_SPI_IOC_CLK;
- else
- sp->ioc_base &= ~AR71XX_SPI_IOC_CLK;
-
- ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
- }
-
- if (gpio_is_valid(spi->cs_gpio)) {
- /* SPI is normally active-low */
- gpio_set_value_cansleep(spi->cs_gpio, cs_high);
- } else {
- u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
-
- if (cs_high)
- sp->ioc_base |= cs_bit;
- else
- sp->ioc_base &= ~cs_bit;
-
- ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
- }
+ if (cs_high)
+ sp->ioc_base |= cs_bit;
+ else
+ sp->ioc_base &= ~cs_bit;
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
static void ath79_spi_enable(struct ath79_spi *sp)
@@ -103,6 +94,9 @@ static void ath79_spi_enable(struct ath79_spi *sp)
sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
+ /* clear clk and mosi in the base state */
+ sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK);
+
/* TODO: setup speed? */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
}
@@ -115,66 +109,6 @@ static void ath79_spi_disable(struct ath79_spi *sp)
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
}
-static int ath79_spi_setup_cs(struct spi_device *spi)
-{
- struct ath79_spi *sp = ath79_spidev_to_sp(spi);
- int status;
-
- status = 0;
- if (gpio_is_valid(spi->cs_gpio)) {
- unsigned long flags;
-
- flags = GPIOF_DIR_OUT;
- if (spi->mode & SPI_CS_HIGH)
- flags |= GPIOF_INIT_LOW;
- else
- flags |= GPIOF_INIT_HIGH;
-
- status = gpio_request_one(spi->cs_gpio, flags,
- dev_name(&spi->dev));
- } else {
- u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
-
- if (spi->mode & SPI_CS_HIGH)
- sp->ioc_base &= ~cs_bit;
- else
- sp->ioc_base |= cs_bit;
-
- ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
- }
-
- return status;
-}
-
-static void ath79_spi_cleanup_cs(struct spi_device *spi)
-{
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
-}
-
-static int ath79_spi_setup(struct spi_device *spi)
-{
- int status = 0;
-
- if (!spi->controller_state) {
- status = ath79_spi_setup_cs(spi);
- if (status)
- return status;
- }
-
- status = spi_bitbang_setup(spi);
- if (status && !spi->controller_state)
- ath79_spi_cleanup_cs(spi);
-
- return status;
-}
-
-static void ath79_spi_cleanup(struct spi_device *spi)
-{
- ath79_spi_cleanup_cs(spi);
- spi_bitbang_cleanup(spi);
-}
-
static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
u32 word, u8 bits, unsigned flags)
{
@@ -225,9 +159,10 @@ static int ath79_spi_probe(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
+ master->use_gpio_descriptors = true;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->setup = ath79_spi_setup;
- master->cleanup = ath79_spi_cleanup;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
if (pdata) {
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
@@ -236,7 +171,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
- sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
sp->bitbang.flags = SPI_CS_HIGH;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 74fddcd3282b..4954f0ab1606 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -23,8 +23,7 @@
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
@@ -312,7 +311,7 @@ struct atmel_spi {
/* Controller-specific per-slave state */
struct atmel_spi_device {
- unsigned int npcs_pin;
+ struct gpio_desc *npcs_pin;
u32 csr;
};
@@ -355,7 +354,6 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
- unsigned active = spi->mode & SPI_CS_HIGH;
u32 mr;
if (atmel_spi_is_v2(as)) {
@@ -379,7 +377,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
mr = spi_readl(as, MR);
if (as->use_cs_gpios)
- gpio_set_value(asd->npcs_pin, active);
+ gpiod_set_value(asd->npcs_pin, 1);
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
@@ -396,19 +394,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
mr = spi_readl(as, MR);
mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
if (as->use_cs_gpios && spi->chip_select != 0)
- gpio_set_value(asd->npcs_pin, active);
+ gpiod_set_value(asd->npcs_pin, 1);
spi_writel(as, MR, mr);
}
- dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
- asd->npcs_pin, active ? " (high)" : "",
- mr);
+ dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
}
static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
- unsigned active = spi->mode & SPI_CS_HIGH;
u32 mr;
/* only deactivate *this* device; sometimes transfers to
@@ -420,14 +415,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
spi_writel(as, MR, mr);
}
- dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
- asd->npcs_pin, active ? " (low)" : "",
- mr);
+ dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
if (!as->use_cs_gpios)
spi_writel(as, CR, SPI_BIT(LASTXFER));
else if (atmel_spi_is_v2(as) || spi->chip_select != 0)
- gpio_set_value(asd->npcs_pin, !active);
+ gpiod_set_value(asd->npcs_pin, 0);
}
static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@@ -1188,7 +1181,6 @@ static int atmel_spi_setup(struct spi_device *spi)
struct atmel_spi_device *asd;
u32 csr;
unsigned int bits = spi->bits_per_word;
- unsigned int npcs_pin;
as = spi_master_get_devdata(spi->master);
@@ -1209,21 +1201,14 @@ static int atmel_spi_setup(struct spi_device *spi)
csr |= SPI_BIT(CSAAT);
/* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
- *
- * DLYBCT would add delays between words, slowing down transfers.
- * It could potentially be useful to cope with DMA bottlenecks, but
- * in those cases it's probably best to just use a lower bitrate.
*/
csr |= SPI_BF(DLYBS, 0);
- csr |= SPI_BF(DLYBCT, 0);
-
- /* chipselect must have been muxed as GPIO (e.g. in board setup) */
- npcs_pin = (unsigned long)spi->controller_data;
- if (!as->use_cs_gpios)
- npcs_pin = spi->chip_select;
- else if (gpio_is_valid(spi->cs_gpio))
- npcs_pin = spi->cs_gpio;
+ /* DLYBCT adds delays between words. This is useful for slow devices
+ * that need a bit of time to setup the next transfer.
+ */
+ csr |= SPI_BF(DLYBCT,
+ (as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5);
asd = spi->controller_state;
if (!asd) {
@@ -1231,11 +1216,21 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!asd)
return -ENOMEM;
- if (as->use_cs_gpios)
- gpio_direction_output(npcs_pin,
- !(spi->mode & SPI_CS_HIGH));
+ /*
+ * If use_cs_gpios is true this means that we have "cs-gpios"
+ * defined in the device tree node so we should have
+ * gotten the GPIO lines from the device tree inside the
+ * SPI core. Warn if this is not the case but continue since
+ * CS GPIOs are after all optional.
+ */
+ if (as->use_cs_gpios) {
+ if (!spi->cs_gpiod) {
+ dev_err(&spi->dev,
+ "host claims to use CS GPIOs but no CS found in DT by the SPI core\n");
+ }
+ asd->npcs_pin = spi->cs_gpiod;
+ }
- asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
}
@@ -1473,41 +1468,6 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.has_pdc_support = version < 0x212;
}
-/*-------------------------------------------------------------------------*/
-static int atmel_spi_gpio_cs(struct platform_device *pdev)
-{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct device_node *np = master->dev.of_node;
- int i;
- int ret = 0;
- int nb = 0;
-
- if (!as->use_cs_gpios)
- return 0;
-
- if (!np)
- return 0;
-
- nb = of_gpio_named_count(np, "cs-gpios");
- for (i = 0; i < nb; i++) {
- int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
- "cs-gpios", i);
-
- if (cs_gpio == -EPROBE_DEFER)
- return cs_gpio;
-
- if (gpio_is_valid(cs_gpio)) {
- ret = devm_gpio_request(&pdev->dev, cs_gpio,
- dev_name(&pdev->dev));
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
static void atmel_spi_init(struct atmel_spi *as)
{
spi_writel(as, CR, SPI_BIT(SWRST));
@@ -1560,6 +1520,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
goto out_free;
/* the spi->mode bits understood by this driver: */
+ master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->dev.of_node = pdev->dev.of_node;
@@ -1592,6 +1553,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
atmel_get_caps(as);
+ /*
+ * If there are chip selects in the device tree, those will be
+ * discovered by the SPI core when registering the SPI master
+ * and assigned to each SPI device.
+ */
as->use_cs_gpios = true;
if (atmel_spi_is_v2(as) &&
pdev->dev.of_node &&
@@ -1600,10 +1566,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
}
- ret = atmel_spi_gpio_cs(pdev);
- if (ret)
- goto out_unmap_regs;
-
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 671e374e1b01..f7e054848ca5 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -456,7 +456,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
}
bs->clk = devm_clk_get(&pdev->dev, NULL);
- if ((!bs->clk) || (IS_ERR(bs->clk))) {
+ if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_master_put;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index f29176000b8d..dd9a8c54a693 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -213,19 +213,6 @@ int spi_bitbang_setup(struct spi_device *spi)
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
- /* NOTE we _need_ to call chipselect() early, ideally with adapter
- * setup, unless the hardware defaults cooperate to avoid confusion
- * between normal (active low) and inverted chipselects.
- */
-
- /* deselect chip (low or high) */
- mutex_lock(&bitbang->lock);
- if (!bitbang->busy) {
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(cs->nsecs);
- }
- mutex_unlock(&bitbang->lock);
-
return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 7c88f74f7f47..43d0e79842ac 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -13,7 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -128,10 +128,6 @@ struct cdns_spi {
u32 is_decoded_cs;
};
-struct cdns_spi_device_data {
- bool gpio_requested;
-};
-
/* Macros for the SPI controller read/write */
static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
{
@@ -176,16 +172,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
/**
* cdns_spi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
- * @is_high: Select(0) or deselect (1) the chip select line
+ * @enable: Select (1) or deselect (0) the chip select line
*/
-static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
+static void cdns_spi_chipselect(struct spi_device *spi, bool enable)
{
struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
u32 ctrl_reg;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
- if (is_high) {
+ if (!enable) {
/* Deselect the slave */
ctrl_reg |= CDNS_SPI_CR_SSCTRL;
} else {
@@ -469,64 +465,6 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master)
return 0;
}
-static int cdns_spi_setup(struct spi_device *spi)
-{
-
- int ret = -EINVAL;
- struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi);
-
- /* this is a pin managed by the controller, leave it alone */
- if (spi->cs_gpio == -ENOENT)
- return 0;
-
- /* this seems to be the first time we're here */
- if (!cdns_spi_data) {
- cdns_spi_data = kzalloc(sizeof(*cdns_spi_data), GFP_KERNEL);
- if (!cdns_spi_data)
- return -ENOMEM;
- cdns_spi_data->gpio_requested = false;
- spi_set_ctldata(spi, cdns_spi_data);
- }
-
- /* if we haven't done so, grab the gpio */
- if (!cdns_spi_data->gpio_requested && gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request_one(spi->cs_gpio,
- (spi->mode & SPI_CS_HIGH) ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
- dev_name(&spi->dev));
- if (ret)
- dev_err(&spi->dev, "can't request chipselect gpio %d\n",
- spi->cs_gpio);
- else
- cdns_spi_data->gpio_requested = true;
- } else {
- if (gpio_is_valid(spi->cs_gpio)) {
- int mode = ((spi->mode & SPI_CS_HIGH) ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
-
- ret = gpio_direction_output(spi->cs_gpio, mode);
- if (ret)
- dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
- spi->cs_gpio, ret);
- }
- }
-
- return ret;
-}
-
-static void cdns_spi_cleanup(struct spi_device *spi)
-{
- struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi);
-
- if (cdns_spi_data) {
- if (cdns_spi_data->gpio_requested)
- gpio_free(spi->cs_gpio);
- kfree(cdns_spi_data);
- spi_set_ctldata(spi, NULL);
- }
-
-}
-
/**
* cdns_spi_probe - Probe method for the SPI driver
* @pdev: Pointer to the platform_device structure
@@ -584,11 +522,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_apb;
}
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@@ -603,8 +536,10 @@ static int cdns_spi_probe(struct platform_device *pdev)
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
@@ -621,13 +556,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
+ master->use_gpio_descriptors = true;
master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
master->prepare_message = cdns_prepare_message;
master->transfer_one = cdns_transfer_one;
master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
master->set_cs = cdns_spi_chipselect;
- master->setup = cdns_spi_setup;
- master->cleanup = cdns_spi_cleanup;
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 18193df2eba8..8c03c409fc07 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -11,7 +11,7 @@
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -36,25 +36,6 @@ struct spi_clps711x_data {
int len;
};
-static int spi_clps711x_setup(struct spi_device *spi)
-{
- if (!spi->controller_state) {
- int ret;
-
- ret = devm_gpio_request(&spi->master->dev, spi->cs_gpio,
- dev_name(&spi->master->dev));
- if (ret)
- return ret;
-
- spi->controller_state = spi;
- }
-
- /* We are expect that SPI-device is not selected */
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
-
- return 0;
-}
-
static int spi_clps711x_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -125,11 +106,11 @@ static int spi_clps711x_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ master->use_gpio_descriptors = true;
master->bus_num = -1;
master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
master->dev.of_node = pdev->dev.of_node;
- master->setup = spi_clps711x_setup;
master->prepare_message = spi_clps711x_prepare_message;
master->transfer_one = spi_clps711x_transfer_one;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 56adec83f8fc..eb246ebcfa3a 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -15,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
@@ -222,12 +221,17 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
* Board specific chip select logic decides the polarity and cs
* line for the controller
*/
- if (spi->cs_gpio >= 0) {
+ if (spi->cs_gpiod) {
+ /*
+ * FIXME: is this code ever executed? This host does not
+ * set SPI_MASTER_GPIO_SS so this chipselect callback should
+ * not get called from the SPI core when we are using
+ * GPIOs for chip select.
+ */
if (value == BITBANG_CS_ACTIVE)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH);
+ gpiod_set_value(spi->cs_gpiod, 1);
else
- gpio_set_value(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
+ gpiod_set_value(spi->cs_gpiod, 0);
} else {
if (value == BITBANG_CS_ACTIVE) {
if (!(spi->mode & SPI_CS_WORD))
@@ -418,30 +422,18 @@ static int davinci_spi_of_setup(struct spi_device *spi)
*/
static int davinci_spi_setup(struct spi_device *spi)
{
- int retval = 0;
struct davinci_spi *dspi;
- struct spi_master *master = spi->master;
struct device_node *np = spi->dev.of_node;
bool internal_cs = true;
dspi = spi_master_get_devdata(spi->master);
if (!(spi->mode & SPI_NO_CS)) {
- if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
- retval = gpio_direction_output(
- spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (np && spi->cs_gpiod)
internal_cs = false;
- }
-
- if (retval) {
- dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
- spi->cs_gpio, retval);
- return retval;
- }
- if (internal_cs) {
+ if (internal_cs)
set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
- }
}
if (spi->mode & SPI_READY)
@@ -962,6 +954,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (ret)
goto free_master;
+ master->use_gpio_descriptors = true;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
master->num_chipselect = pdata->num_chipselect;
@@ -980,27 +973,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (dspi->version == SPI_VERSION_2)
dspi->bitbang.flags |= SPI_READY;
- if (pdev->dev.of_node) {
- int i;
-
- for (i = 0; i < pdata->num_chipselect; i++) {
- int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
- "cs-gpios", i);
-
- if (cs_gpio == -EPROBE_DEFER) {
- ret = cs_gpio;
- goto free_clk;
- }
-
- if (gpio_is_valid(cs_gpio)) {
- ret = devm_gpio_request(&pdev->dev, cs_gpio,
- dev_name(&pdev->dev));
- if (ret)
- goto free_clk;
- }
- }
- }
-
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
ret = davinci_spi_request_dma(dspi);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index d0dd7814e997..4bd59a93d988 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -18,7 +18,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/acpi.h>
#include <linux/property.h>
@@ -185,27 +184,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws->num_cs = num_cs;
- if (pdev->dev.of_node) {
- int i;
-
- for (i = 0; i < dws->num_cs; i++) {
- int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
- "cs-gpios", i);
-
- if (cs_gpio == -EPROBE_DEFER) {
- ret = cs_gpio;
- goto out;
- }
-
- if (gpio_is_valid(cs_gpio)) {
- ret = devm_gpio_request(&pdev->dev, cs_gpio,
- dev_name(&pdev->dev));
- if (ret)
- goto out;
- }
- }
- }
-
init_func = device_get_match_data(&pdev->dev);
if (init_func) {
ret = init_func(pdev, dwsmmio);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 2e822a56576a..ac81025f86ab 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
#include "spi-dw.h"
@@ -54,41 +53,41 @@ static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
if (!buf)
return 0;
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"%s registers:\n", dev_name(&dws->master->dev));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
- len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -138,11 +137,10 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct chip_data *chip = spi_get_ctldata(spi);
- /* Chip select logic is inverted from spi_set_cs() */
if (chip && chip->cs_control)
- chip->cs_control(!enable);
+ chip->cs_control(enable);
- if (!enable)
+ if (enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
else if (dws->cs_override)
dw_writel(dws, DW_SPI_SER, 0);
@@ -317,7 +315,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
- | (spi->mode << SPI_MODE_OFFSET)
+ | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
+ (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET))
| (chip->tmode << SPI_TMOD_OFFSET);
/*
@@ -397,7 +396,6 @@ static int dw_spi_setup(struct spi_device *spi)
{
struct dw_spi_chip *chip_info = NULL;
struct chip_data *chip;
- int ret;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
@@ -425,13 +423,6 @@ static int dw_spi_setup(struct spi_device *spi)
chip->tmode = SPI_TMOD_TR;
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_direction_output(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -496,6 +487,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
goto err_free_master;
}
+ master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->bus_num = dws->bus_num;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 5e10dc5c93a5..53335ccc98f6 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -67,7 +67,7 @@
#define SPI_SR 0x2c
#define SPI_SR_EOQF 0x10000000
#define SPI_SR_TCFQF 0x80000000
-#define SPI_SR_CLEAR 0xdaad0000
+#define SPI_SR_CLEAR 0x9aaf0000
#define SPI_RSER_TFFFE BIT(25)
#define SPI_RSER_TFFFD BIT(24)
@@ -233,6 +233,9 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
+ if (spi_controller_is_slave(dspi->master))
+ return data;
+
if (dspi->len > 0)
cmd |= SPI_PUSHR_CMD_CONT;
return cmd << 16 | data;
@@ -329,6 +332,11 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma_async_issue_pending(dma->chan_rx);
dma_async_issue_pending(dma->chan_tx);
+ if (spi_controller_is_slave(dspi->master)) {
+ wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
+ return 0;
+ }
+
time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
DMA_COMPLETION_TIMEOUT);
if (time_left == 0) {
@@ -798,14 +806,18 @@ static int dspi_setup(struct spi_device *spi)
ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
- | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
- | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
- | SPI_CTAR_PCSSCK(pcssck)
- | SPI_CTAR_CSSCK(cssck)
- | SPI_CTAR_PASC(pasc)
- | SPI_CTAR_ASC(asc)
- | SPI_CTAR_PBR(pbr)
- | SPI_CTAR_BR(br);
+ | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0);
+
+ if (!spi_controller_is_slave(dspi->master)) {
+ chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode &
+ SPI_LSB_FIRST ? 1 : 0)
+ | SPI_CTAR_PCSSCK(pcssck)
+ | SPI_CTAR_CSSCK(cssck)
+ | SPI_CTAR_PASC(pasc)
+ | SPI_CTAR_ASC(asc)
+ | SPI_CTAR_PBR(pbr)
+ | SPI_CTAR_BR(br);
+ }
spi_set_ctldata(spi, chip);
@@ -970,8 +982,13 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
static void dspi_init(struct fsl_dspi *dspi)
{
- regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
- (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
+ unsigned int mcr = SPI_MCR_PCSIS |
+ (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0);
+
+ if (!spi_controller_is_slave(dspi->master))
+ mcr |= SPI_MCR_MASTER;
+
+ regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
if (dspi->devtype_data->xspi_mode)
regmap_write(dspi->regmap, SPI_CTARE(0),
@@ -1027,6 +1044,9 @@ static int dspi_probe(struct platform_device *pdev)
}
master->bus_num = bus_num;
+ if (of_property_read_bool(np, "spi-slave"))
+ master->slave = true;
+
dspi->devtype_data = of_device_get_match_data(&pdev->dev);
if (!dspi->devtype_data) {
dev_err(&pdev->dev, "can't get devtype_data\n");
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 08dcc3c22e88..391863914043 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -48,10 +48,13 @@
#define CR_RTF BIT(8)
#define CR_RST BIT(1)
#define CR_MEN BIT(0)
+#define SR_MBF BIT(24)
#define SR_TCF BIT(10)
+#define SR_FCF BIT(9)
#define SR_RDF BIT(1)
#define SR_TDF BIT(0)
#define IER_TCIE BIT(10)
+#define IER_FCIE BIT(9)
#define IER_RDIE BIT(1)
#define IER_TDIE BIT(0)
#define CFGR1_PCSCFG BIT(27)
@@ -59,6 +62,7 @@
#define CFGR1_PCSPOL BIT(8)
#define CFGR1_NOSTALL BIT(3)
#define CFGR1_MASTER BIT(0)
+#define FSR_RXCOUNT (BIT(16)|BIT(17)|BIT(18))
#define RSR_RXEMPTY BIT(1)
#define TCR_CPOL BIT(31)
#define TCR_CPHA BIT(30)
@@ -161,28 +165,10 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
return 0;
}
-static int fsl_lpspi_txfifo_empty(struct fsl_lpspi_data *fsl_lpspi)
-{
- u32 txcnt;
- unsigned long orig_jiffies = jiffies;
-
- do {
- txcnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
-
- if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
- dev_dbg(fsl_lpspi->dev, "txfifo empty timeout\n");
- return -ETIMEDOUT;
- }
- cond_resched();
-
- } while (txcnt);
-
- return 0;
-}
-
static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
{
u8 txfifo_cnt;
+ u32 temp;
txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
@@ -193,9 +179,15 @@ static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
txfifo_cnt++;
}
- if (!fsl_lpspi->remain && (txfifo_cnt < fsl_lpspi->txfifosize))
- writel(0, fsl_lpspi->base + IMX7ULP_TDR);
- else
+ if (txfifo_cnt < fsl_lpspi->txfifosize) {
+ if (!fsl_lpspi->is_slave) {
+ temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
+ temp &= ~TCR_CONTC;
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+ }
+
+ fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
+ } else
fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
}
@@ -276,10 +268,6 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
u32 temp;
int ret;
- temp = CR_RST;
- writel(temp, fsl_lpspi->base + IMX7ULP_CR);
- writel(0, fsl_lpspi->base + IMX7ULP_CR);
-
if (!fsl_lpspi->is_slave) {
ret = fsl_lpspi_set_bitrate(fsl_lpspi);
if (ret)
@@ -370,6 +358,24 @@ static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
return 0;
}
+static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
+{
+ u32 temp;
+
+ /* Disable all interrupt */
+ fsl_lpspi_intctrl(fsl_lpspi, 0);
+
+ /* W1C for all flags in SR */
+ temp = 0x3F << 8;
+ writel(temp, fsl_lpspi->base + IMX7ULP_SR);
+
+ /* Clear FIFO and disable module */
+ temp = CR_RRF | CR_RTF;
+ writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+
+ return 0;
+}
+
static int fsl_lpspi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *t)
@@ -391,11 +397,7 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller,
if (ret)
return ret;
- ret = fsl_lpspi_txfifo_empty(fsl_lpspi);
- if (ret)
- return ret;
-
- fsl_lpspi_read_rx_fifo(fsl_lpspi);
+ fsl_lpspi_reset(fsl_lpspi);
return 0;
}
@@ -408,7 +410,6 @@ static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller,
struct spi_device *spi = msg->spi;
struct spi_transfer *xfer;
bool is_first_xfer = true;
- u32 temp;
int ret = 0;
msg->status = 0;
@@ -428,13 +429,6 @@ static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller,
}
complete:
- if (!fsl_lpspi->is_slave) {
- /* de-assert SS, then finalize current message */
- temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
- temp &= ~TCR_CONTC;
- writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
- }
-
msg->status = ret;
spi_finalize_current_message(controller);
@@ -443,20 +437,30 @@ complete:
static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
{
+ u32 temp_SR, temp_IER;
struct fsl_lpspi_data *fsl_lpspi = dev_id;
- u32 temp;
+ temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER);
fsl_lpspi_intctrl(fsl_lpspi, 0);
- temp = readl(fsl_lpspi->base + IMX7ULP_SR);
+ temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR);
fsl_lpspi_read_rx_fifo(fsl_lpspi);
- if (temp & SR_TDF) {
+ if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) {
fsl_lpspi_write_tx_fifo(fsl_lpspi);
+ return IRQ_HANDLED;
+ }
- if (!fsl_lpspi->remain)
- complete(&fsl_lpspi->xfer_done);
+ if (temp_SR & SR_MBF ||
+ readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_RXCOUNT) {
+ writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
+ fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
+ return IRQ_HANDLED;
+ }
+ if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) {
+ writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
+ complete(&fsl_lpspi->xfer_done);
return IRQ_HANDLED;
}
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
new file mode 100644
index 000000000000..6a713f78a62e
--- /dev/null
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -0,0 +1,966 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Freescale QuadSPI driver.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2018 Bootlin
+ * Copyright (C) 2018 exceet electronics GmbH
+ * Copyright (C) 2018 Kontron Electronics GmbH
+ *
+ * Transition to SPI MEM interface:
+ * Authors:
+ * Boris Brezillon <bbrezillon@kernel.org>
+ * Frieder Schrempf <frieder.schrempf@kontron.de>
+ * Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
+ * Suresh Gupta <suresh.gupta@nxp.com>
+ *
+ * Based on the original fsl-quadspi.c spi-nor driver:
+ * Author: Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/sizes.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry (15).
+ */
+#define SEQID_LUT 15
+
+/* Registers used by the driver */
+#define QUADSPI_MCR 0x00
+#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
+#define QUADSPI_MCR_MDIS_MASK BIT(14)
+#define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
+#define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
+#define QUADSPI_MCR_DDR_EN_MASK BIT(7)
+#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
+#define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
+#define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
+
+#define QUADSPI_IPCR 0x08
+#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
+
+#define QUADSPI_BUF3CR 0x1c
+#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
+#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
+#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
+
+#define QUADSPI_BFGENCR 0x20
+#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
+
+#define QUADSPI_BUF0IND 0x30
+#define QUADSPI_BUF1IND 0x34
+#define QUADSPI_BUF2IND 0x38
+#define QUADSPI_SFAR 0x100
+
+#define QUADSPI_SMPR 0x108
+#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
+#define QUADSPI_SMPR_FSDLY_MASK BIT(6)
+#define QUADSPI_SMPR_FSPHS_MASK BIT(5)
+#define QUADSPI_SMPR_HSENA_MASK BIT(0)
+
+#define QUADSPI_RBCT 0x110
+#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
+#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
+
+#define QUADSPI_TBDR 0x154
+
+#define QUADSPI_SR 0x15c
+#define QUADSPI_SR_IP_ACC_MASK BIT(1)
+#define QUADSPI_SR_AHB_ACC_MASK BIT(2)
+
+#define QUADSPI_FR 0x160
+#define QUADSPI_FR_TFF_MASK BIT(0)
+
+#define QUADSPI_SPTRCLR 0x16c
+#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
+#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
+
+#define QUADSPI_SFA1AD 0x180
+#define QUADSPI_SFA2AD 0x184
+#define QUADSPI_SFB1AD 0x188
+#define QUADSPI_SFB2AD 0x18c
+#define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
+
+#define QUADSPI_LUTKEY 0x300
+#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define QUADSPI_LCKCR 0x304
+#define QUADSPI_LCKER_LOCK BIT(0)
+#define QUADSPI_LCKER_UNLOCK BIT(1)
+
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE BIT(0)
+
+#define QUADSPI_LUT_BASE 0x310
+#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+#define QUADSPI_LUT_REG(idx) \
+ (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
+
+/* Instruction set for the LUT register */
+#define LUT_STOP 0
+#define LUT_CMD 1
+#define LUT_ADDR 2
+#define LUT_DUMMY 3
+#define LUT_MODE 4
+#define LUT_MODE2 5
+#define LUT_MODE4 6
+#define LUT_FSL_READ 7
+#define LUT_FSL_WRITE 8
+#define LUT_JMP_ON_CS 9
+#define LUT_ADDR_DDR 10
+#define LUT_MODE_DDR 11
+#define LUT_MODE2_DDR 12
+#define LUT_MODE4_DDR 13
+#define LUT_FSL_READ_DDR 14
+#define LUT_FSL_WRITE_DDR 15
+#define LUT_DATA_LEARN 16
+
+/*
+ * The PAD definitions for LUT register.
+ *
+ * The pad stands for the number of IO lines [0:3].
+ * For example, the quad read needs four IO lines,
+ * so you should use LUT_PAD(4).
+ */
+#define LUT_PAD(x) (fls(x) - 1)
+
+/*
+ * Macro for constructing the LUT entries with the following
+ * register layout:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define LUT_DEF(idx, ins, pad, opr) \
+ ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
+
+/* Controller needs driver to swap endianness */
+#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
+
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
+
+/*
+ * TKT253890, the controller needs the driver to fill the txfifo with
+ * 16 bytes at least to trigger a data transfer, even though the extra
+ * data won't be transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890 BIT(2)
+
+/* TKT245618, the controller cannot wake up from wait mode */
+#define QUADSPI_QUIRK_TKT245618 BIT(3)
+
+/*
+ * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
+ * internally. No need to add it when setting SFXXAD and SFAR registers
+ */
+#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
+
+struct fsl_qspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
+ bool little_endian;
+};
+
+static const struct fsl_qspi_devtype_data vybrid_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx6sx_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx7d_data = {
+ .rxfifo = SZ_512,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx6ul_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data ls1021a_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = 0,
+ .little_endian = false,
+};
+
+static const struct fsl_qspi_devtype_data ls2080a_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
+ .little_endian = true,
+};
+
+struct fsl_qspi {
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ const struct fsl_qspi_devtype_data *devtype_data;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
+ int selected;
+};
+
+static inline int needs_swap_endian(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
+}
+
+static inline int needs_4x_clock(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
+}
+
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
+}
+
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
+}
+
+static inline int needs_amba_base_offset(struct fsl_qspi *q)
+{
+ return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
+}
+
+/*
+ * An IC bug makes it necessary to rearrange the 32-bit data.
+ * Later chips, such as IMX6SLX, have fixed this bug.
+ */
+static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
+{
+ return needs_swap_endian(q) ? __swab32(a) : a;
+}
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The QSPI controller's endianness is independent of
+ * the CPU core's endianness. So far, although the CPU
+ * core is little-endian the QSPI controller can use
+ * big-endian or little-endian.
+ */
+static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
+{
+ if (q->devtype_data->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
+{
+ if (q->devtype_data->little_endian)
+ return ioread32(addr);
+
+ return ioread32be(addr);
+}
+
+static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
+{
+ struct fsl_qspi *q = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = qspi_readl(q, q->iobase + QUADSPI_FR);
+ qspi_writel(q, reg, q->iobase + QUADSPI_FR);
+
+ if (reg & QUADSPI_FR_TFF_MASK)
+ complete(&q->c);
+
+ dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
+ return IRQ_HANDLED;
+}
+
+static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
+{
+ switch (width) {
+ case 1:
+ case 2:
+ case 4:
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool fsl_qspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
+
+ if (op->addr.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
+
+ if (op->dummy.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
+
+ if (ret)
+ return false;
+
+ /*
+ * The number of instructions needed for the op, needs
+ * to fit into a single LUT entry.
+ */
+ if (op->addr.nbytes +
+ (op->dummy.nbytes ? 1:0) +
+ (op->data.nbytes ? 1:0) > 6)
+ return false;
+
+ /* Max 64 dummy clock cycles supported */
+ if (op->dummy.nbytes &&
+ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
+ return false;
+
+ /* Max data length, check controller limits and alignment */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ (op->data.nbytes > q->devtype_data->ahb_buf_size ||
+ (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
+ !IS_ALIGNED(op->data.nbytes, 8))))
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ op->data.nbytes > q->devtype_data->txfifo)
+ return false;
+
+ return true;
+}
+
+static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
+
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+
+ /*
+ * For some unknown reason, using LUT_ADDR doesn't work in some
+ * cases (at least with only one byte long addresses), so
+ * let's use LUT_MODE to write the address bytes one by one
+ */
+ for (i = 0; i < op->addr.nbytes; i++) {
+ u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
+ LUT_PAD(op->addr.buswidth),
+ addrbyte);
+ lutidx++;
+ }
+
+ if (op->dummy.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ LUT_PAD(op->dummy.buswidth),
+ op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ lutidx++;
+ }
+
+ if (op->data.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ LUT_FSL_READ : LUT_FSL_WRITE,
+ LUT_PAD(op->data.buswidth),
+ 0);
+ lutidx++;
+ }
+
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
+
+ /* unlock LUT */
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+
+ /* fill LUT */
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
+
+ /* lock LUT */
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
+{
+ int ret;
+
+ ret = clk_prepare_enable(q->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(q->clk);
+ if (ret) {
+ clk_disable_unprepare(q->clk_en);
+ return ret;
+ }
+
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+
+ return 0;
+}
+
+static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
+{
+ if (needs_wakeup_wait_mode(q))
+ pm_qos_remove_request(&q->pm_qos_req);
+
+ clk_disable_unprepare(q->clk);
+ clk_disable_unprepare(q->clk_en);
+}
+
+/*
+ * If we have changed the content of the flash by writing or erasing, or if we
+ * read from flash with a different offset into the page buffer, we need to
+ * invalidate the AHB buffer. If we do not do so, we may read out the wrong
+ * data. The spec tells us reset the AHB domain and Serial Flash domain at
+ * the same time.
+ */
+static void fsl_qspi_invalidate(struct fsl_qspi *q)
+{
+ u32 reg;
+
+ reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
+ reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+
+ /*
+ * The minimum delay : 1 AHB + 2 SFCK clocks.
+ * Delay 1 us is enough.
+ */
+ udelay(1);
+
+ reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+}
+
+static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
+{
+ unsigned long rate = spi->max_speed_hz;
+ int ret;
+
+ if (q->selected == spi->chip_select)
+ return;
+
+ if (needs_4x_clock(q))
+ rate *= 4;
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ ret = clk_set_rate(q->clk, rate);
+ if (ret)
+ return;
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return;
+
+ q->selected = spi->chip_select;
+
+ fsl_qspi_invalidate(q);
+}
+
+static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in,
+ q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
+ op->data.nbytes);
+}
+
+static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int i;
+ u32 val;
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
+ memcpy(&val, op->data.buf.out + i, 4);
+ val = fsl_qspi_endian_xchg(q, val);
+ qspi_writel(q, val, base + QUADSPI_TBDR);
+ }
+
+ if (i < op->data.nbytes) {
+ memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
+ val = fsl_qspi_endian_xchg(q, val);
+ qspi_writel(q, val, base + QUADSPI_TBDR);
+ }
+
+ if (needs_fill_txfifo(q)) {
+ for (i = op->data.nbytes; i < 16; i += 4)
+ qspi_writel(q, 0, base + QUADSPI_TBDR);
+ }
+}
+
+static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int i;
+ u8 *buf = op->data.buf.in;
+ u32 val;
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
+ val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
+ val = fsl_qspi_endian_xchg(q, val);
+ memcpy(buf + i, &val, 4);
+ }
+
+ if (i < op->data.nbytes) {
+ val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
+ val = fsl_qspi_endian_xchg(q, val);
+ memcpy(buf + i, &val, op->data.nbytes - i);
+ }
+}
+
+static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int err = 0;
+
+ init_completion(&q->c);
+
+ /*
+ * Always start the sequence at the same index since we update
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
+ qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
+ base + QUADSPI_IPCR);
+
+ /* Wait for the interrupt. */
+ if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ fsl_qspi_read_rxfifo(q, op);
+
+ return err;
+}
+
+static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
+ u32 mask, u32 delay_us, u32 timeout_us)
+{
+ u32 reg;
+
+ if (!q->devtype_data->little_endian)
+ mask = (u32)cpu_to_be32(mask);
+
+ return readl_poll_timeout(base, reg, !(reg & mask), delay_us,
+ timeout_us);
+}
+
+static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ void __iomem *base = q->iobase;
+ u32 addr_offset = 0;
+ int err = 0;
+
+ mutex_lock(&q->lock);
+
+ /* wait for the controller being ready */
+ fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
+ QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
+
+ fsl_qspi_select_mem(q, mem->spi);
+
+ if (needs_amba_base_offset(q))
+ addr_offset = q->memmap_phy;
+
+ qspi_writel(q,
+ q->selected * q->devtype_data->ahb_buf_size + addr_offset,
+ base + QUADSPI_SFAR);
+
+ qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
+ QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
+ base + QUADSPI_MCR);
+
+ qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
+ base + QUADSPI_SPTRCLR);
+
+ fsl_qspi_prepare_lut(q, op);
+
+ /*
+ * If we have large chunks of data, we read them through the AHB bus
+ * by accessing the mapped memory. In all other cases we use
+ * IP commands to access the flash.
+ */
+ if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
+ op->data.dir == SPI_MEM_DATA_IN) {
+ fsl_qspi_read_ahb(q, op);
+ } else {
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
+ QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
+
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ fsl_qspi_fill_txfifo(q, op);
+
+ err = fsl_qspi_do_op(q, op);
+ }
+
+ /* Invalidate the data in the AHB buffer. */
+ fsl_qspi_invalidate(q);
+
+ mutex_unlock(&q->lock);
+
+ return err;
+}
+
+static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes > q->devtype_data->txfifo)
+ op->data.nbytes = q->devtype_data->txfifo;
+ } else {
+ if (op->data.nbytes > q->devtype_data->ahb_buf_size)
+ op->data.nbytes = q->devtype_data->ahb_buf_size;
+ else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
+ op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
+ }
+
+ return 0;
+}
+
+static int fsl_qspi_default_setup(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ u32 reg, addr_offset = 0;
+ int ret;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ fsl_qspi_clk_disable_unprep(q);
+
+ /* the default frequency, we will change it later if necessary. */
+ ret = clk_set_rate(q->clk, 66000000);
+ if (ret)
+ return ret;
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ base + QUADSPI_MCR);
+ udelay(1);
+
+ /* Disable the module */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
+ reg = qspi_readl(q, base + QUADSPI_SMPR);
+ qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+ | QUADSPI_SMPR_HSENA_MASK
+ | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
+
+ /* We only use the buffer3 for AHB read */
+ qspi_writel(q, 0, base + QUADSPI_BUF0IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF1IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF2IND);
+
+ qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
+ q->iobase + QUADSPI_BFGENCR);
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
+ QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
+ base + QUADSPI_BUF3CR);
+
+ if (needs_amba_base_offset(q))
+ addr_offset = q->memmap_phy;
+
+ /*
+ * In HW there can be a maximum of four chips on two buses with
+ * two chip selects on each bus. We use four chip selects in SW
+ * to differentiate between the four chips.
+ * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
+ * SFB2AD accordingly.
+ */
+ qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset,
+ base + QUADSPI_SFA1AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset,
+ base + QUADSPI_SFA2AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset,
+ base + QUADSPI_SFB1AD);
+ qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset,
+ base + QUADSPI_SFB2AD);
+
+ q->selected = -1;
+
+ /* Enable the module */
+ qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ base + QUADSPI_MCR);
+
+ /* clear all interrupt status */
+ qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
+
+ /* enable the interrupt */
+ qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
+
+ return 0;
+}
+
+static const char *fsl_qspi_get_name(struct spi_mem *mem)
+{
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ struct device *dev = &mem->spi->dev;
+ const char *name;
+
+ /*
+ * In order to keep mtdparts compatible with the old MTD driver at
+ * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the
+ * platform_device of the controller.
+ */
+ if (of_get_available_child_count(q->dev->of_node) == 1)
+ return dev_name(q->dev);
+
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", dev_name(q->dev),
+ mem->spi->chip_select);
+
+ if (!name) {
+ dev_err(dev, "failed to get memory for custom flash name\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return name;
+}
+
+static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
+ .adjust_op_size = fsl_qspi_adjust_op_size,
+ .supports_op = fsl_qspi_supports_op,
+ .exec_op = fsl_qspi_exec_op,
+ .get_name = fsl_qspi_get_name,
+};
+
+static int fsl_qspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct fsl_qspi *q;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*q));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ q = spi_controller_get_devdata(ctlr);
+ q->dev = dev;
+ q->devtype_data = of_device_get_match_data(dev);
+ if (!q->devtype_data) {
+ ret = -ENODEV;
+ goto err_put_ctrl;
+ }
+
+ platform_set_drvdata(pdev, q);
+
+ /* find the resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
+ q->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->iobase)) {
+ ret = PTR_ERR(q->iobase);
+ goto err_put_ctrl;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "QuadSPI-memory");
+ q->ahb_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(q->ahb_addr)) {
+ ret = PTR_ERR(q->ahb_addr);
+ goto err_put_ctrl;
+ }
+
+ q->memmap_phy = res->start;
+
+ /* find the clocks */
+ q->clk_en = devm_clk_get(dev, "qspi_en");
+ if (IS_ERR(q->clk_en)) {
+ ret = PTR_ERR(q->clk_en);
+ goto err_put_ctrl;
+ }
+
+ q->clk = devm_clk_get(dev, "qspi");
+ if (IS_ERR(q->clk)) {
+ ret = PTR_ERR(q->clk);
+ goto err_put_ctrl;
+ }
+
+ ret = fsl_qspi_clk_prep_enable(q);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ goto err_put_ctrl;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to get the irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ ret = devm_request_irq(dev, ret,
+ fsl_qspi_irq_handler, 0, pdev->name, q);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ mutex_init(&q->lock);
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 4;
+ ctlr->mem_ops = &fsl_qspi_mem_ops;
+
+ fsl_qspi_default_setup(q);
+
+ ctlr->dev.of_node = np;
+
+ ret = spi_register_controller(ctlr);
+ if (ret)
+ goto err_destroy_mutex;
+
+ return 0;
+
+err_destroy_mutex:
+ mutex_destroy(&q->lock);
+
+err_disable_clk:
+ fsl_qspi_clk_disable_unprep(q);
+
+err_put_ctrl:
+ spi_controller_put(ctlr);
+
+ dev_err(dev, "Freescale QuadSPI probe failed\n");
+ return ret;
+}
+
+static int fsl_qspi_remove(struct platform_device *pdev)
+{
+ struct fsl_qspi *q = platform_get_drvdata(pdev);
+
+ /* disable the hardware */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ mutex_destroy(&q->lock);
+
+ return 0;
+}
+
+static int fsl_qspi_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int fsl_qspi_resume(struct device *dev)
+{
+ struct fsl_qspi *q = dev_get_drvdata(dev);
+
+ fsl_qspi_default_setup(q);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
+ { .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, },
+ { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
+
+static const struct dev_pm_ops fsl_qspi_pm_ops = {
+ .suspend = fsl_qspi_suspend,
+ .resume = fsl_qspi_resume,
+};
+
+static struct platform_driver fsl_qspi_driver = {
+ .driver = {
+ .name = "fsl-quadspi",
+ .of_match_table = fsl_qspi_dt_ids,
+ .pm = &fsl_qspi_pm_ops,
+ },
+ .probe = fsl_qspi_probe,
+ .remove = fsl_qspi_remove,
+};
+module_platform_driver(fsl_qspi_driver);
+
+MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
+MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
+MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>");
+MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index fdb7cb88fb56..5f0b0d5bfef4 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -89,9 +89,6 @@ struct spi_geni_master {
int irq;
};
-static void handle_fifo_timeout(struct spi_master *spi,
- struct spi_message *msg);
-
static int get_spi_clk_cfg(unsigned int speed_hz,
struct spi_geni_master *mas,
unsigned int *clk_idx,
@@ -122,6 +119,32 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret;
}
+static void handle_fifo_timeout(struct spi_master *spi,
+ struct spi_message *msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ unsigned long time_left, flags;
+ struct geni_se *se = &mas->se;
+
+ spin_lock_irqsave(&mas->lock, flags);
+ reinit_completion(&mas->xfer_done);
+ mas->cur_mcmd = CMD_CANCEL;
+ geni_se_cancel_m_cmd(se);
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ spin_unlock_irqrestore(&mas->lock, flags);
+ time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (time_left)
+ return;
+
+ spin_lock_irqsave(&mas->lock, flags);
+ reinit_completion(&mas->xfer_done);
+ geni_se_abort_m_cmd(se);
+ spin_unlock_irqrestore(&mas->lock, flags);
+ time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (!time_left)
+ dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+}
+
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
@@ -233,7 +256,6 @@ static int spi_geni_prepare_message(struct spi_master *spi,
struct geni_se *se = &mas->se;
geni_se_select_mode(se, GENI_SE_FIFO);
- reinit_completion(&mas->xfer_done);
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -357,32 +379,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
}
-static void handle_fifo_timeout(struct spi_master *spi,
- struct spi_message *msg)
-{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
- unsigned long time_left, flags;
- struct geni_se *se = &mas->se;
-
- spin_lock_irqsave(&mas->lock, flags);
- reinit_completion(&mas->xfer_done);
- mas->cur_mcmd = CMD_CANCEL;
- geni_se_cancel_m_cmd(se);
- writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
- spin_unlock_irqrestore(&mas->lock, flags);
- time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
- if (time_left)
- return;
-
- spin_lock_irqsave(&mas->lock, flags);
- reinit_completion(&mas->xfer_done);
- geni_se_abort_m_cmd(se);
- spin_unlock_irqrestore(&mas->lock, flags);
- time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
- if (!time_left)
- dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
-}
-
static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_device *slv,
struct spi_transfer *xfer)
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index a4aee26028cd..53b35c56a557 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -428,7 +428,8 @@ static int spi_gpio_probe(struct platform_device *pdev)
return status;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL;
+ master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
+ SPI_CS_HIGH;
master->flags = master_flags;
master->bus_num = pdev->id;
/* The master needs to think there is a chipselect even if not connected */
@@ -455,7 +456,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
}
spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
- spi_gpio->bitbang.flags = SPI_CS_HIGH;
status = spi_bitbang_start(&spi_gpio->bitbang);
if (status)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 5217a5628be2..a4d8d19ecff9 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -537,7 +537,6 @@ EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
/**
* spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
* @desc: the direct mapping descriptor to destroy
- * @info: direct mapping information
*
* This function destroys a direct mapping descriptor previously created by
* spi_mem_dirmap_create().
@@ -548,9 +547,80 @@ void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
ctlr->mem_ops->dirmap_destroy(desc);
+
+ kfree(desc);
}
EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
+static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
+{
+ struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
+
+ spi_mem_dirmap_destroy(desc);
+}
+
+/**
+ * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
+ * it to a device
+ * @dev: device the dirmap desc will be attached to
+ * @mem: SPI mem device this direct mapping should be created for
+ * @info: direct mapping information
+ *
+ * devm_ variant of the spi_mem_dirmap_create() function. See
+ * spi_mem_dirmap_create() for more details.
+ *
+ * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
+ */
+struct spi_mem_dirmap_desc *
+devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info)
+{
+ struct spi_mem_dirmap_desc **ptr, *desc;
+
+ ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ desc = spi_mem_dirmap_create(mem, info);
+ if (IS_ERR(desc)) {
+ devres_free(ptr);
+ } else {
+ *ptr = desc;
+ devres_add(dev, ptr);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
+
+static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
+{
+ struct spi_mem_dirmap_desc **ptr = res;
+
+ if (WARN_ON(!ptr || !*ptr))
+ return 0;
+
+ return *ptr == data;
+}
+
+/**
+ * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
+ * to a device
+ * @dev: device the dirmap desc is attached to
+ * @desc: the direct mapping descriptor to destroy
+ *
+ * devm_ variant of the spi_mem_dirmap_destroy() function. See
+ * spi_mem_dirmap_destroy() for more details.
+ */
+void devm_spi_mem_dirmap_destroy(struct device *dev,
+ struct spi_mem_dirmap_desc *desc)
+{
+ devres_release(dev, devm_spi_mem_dirmap_release,
+ devm_spi_mem_dirmap_match, desc);
+}
+EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
+
/**
* spi_mem_dirmap_dirmap_read() - Read data through a direct mapping
* @desc: direct mapping descriptor
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 6ac95a2a21ce..7bf53cfc25d6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -39,6 +39,7 @@
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
+#include <trace/events/spi.h>
#define DRIVER_NAME "mxs-spi"
@@ -374,6 +375,8 @@ static int mxs_spi_transfer_one(struct spi_master *master,
list_for_each_entry(t, &m->transfers, transfer_list) {
+ trace_spi_transfer_start(m, t);
+
status = mxs_spi_setup_transfer(m->spi, t);
if (status)
break;
@@ -419,6 +422,8 @@ static int mxs_spi_transfer_one(struct spi_master *master,
flag);
}
+ trace_spi_transfer_stop(m, t);
+
if (status) {
stmp_reset_block(ssp->base);
break;
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index e1dca79b9090..734a2b956959 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -465,7 +465,8 @@ out_master_put:
static int npcm_pspi_remove(struct platform_device *pdev)
{
- struct npcm_pspi *priv = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
npcm_pspi_reset_hw(priv);
clk_disable_unprepare(priv->clk);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
new file mode 100644
index 000000000000..8894f98cc99c
--- /dev/null
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * NXP FlexSPI(FSPI) controller driver.
+ *
+ * Copyright 2019 NXP.
+ *
+ * FlexSPI is a flexsible SPI host controller which supports two SPI
+ * channels and up to 4 external devices. Each channel supports
+ * Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional
+ * data lines).
+ *
+ * FlexSPI controller is driven by the LUT(Look-up Table) registers
+ * LUT registers are a look-up-table for sequences of instructions.
+ * A valid sequence consists of four LUT registers.
+ * Maximum 32 LUT sequences can be programmed simultaneously.
+ *
+ * LUTs are being created at run-time based on the commands passed
+ * from the spi-mem framework, thus using single LUT index.
+ *
+ * Software triggered Flash read/write access by IP Bus.
+ *
+ * Memory mapped read access by AHB Bus.
+ *
+ * Based on SPI MEM interface and spi-fsl-qspi.c driver.
+ *
+ * Author:
+ * Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>
+ * Boris Brezillon <bbrezillon@kernel.org>
+ * Frieder Schrempf <frieder.schrempf@kontron.de>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/sizes.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry (31).
+ */
+#define SEQID_LUT 31
+
+/* Registers used by the driver */
+#define FSPI_MCR0 0x00
+#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
+#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16)
+#define FSPI_MCR0_LEARN_EN BIT(15)
+#define FSPI_MCR0_SCRFRUN_EN BIT(14)
+#define FSPI_MCR0_OCTCOMB_EN BIT(13)
+#define FSPI_MCR0_DOZE_EN BIT(12)
+#define FSPI_MCR0_HSEN BIT(11)
+#define FSPI_MCR0_SERCLKDIV BIT(8)
+#define FSPI_MCR0_ATDF_EN BIT(7)
+#define FSPI_MCR0_ARDF_EN BIT(6)
+#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4)
+#define FSPI_MCR0_END_CFG(x) ((x) << 2)
+#define FSPI_MCR0_MDIS BIT(1)
+#define FSPI_MCR0_SWRST BIT(0)
+
+#define FSPI_MCR1 0x04
+#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16)
+#define FSPI_MCR1_AHB_TIMEOUT(x) (x)
+
+#define FSPI_MCR2 0x08
+#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24)
+#define FSPI_MCR2_SAMEDEVICEEN BIT(15)
+#define FSPI_MCR2_CLRLRPHS BIT(14)
+#define FSPI_MCR2_ABRDATSZ BIT(8)
+#define FSPI_MCR2_ABRLEARN BIT(7)
+#define FSPI_MCR2_ABR_READ BIT(6)
+#define FSPI_MCR2_ABRWRITE BIT(5)
+#define FSPI_MCR2_ABRDUMMY BIT(4)
+#define FSPI_MCR2_ABR_MODE BIT(3)
+#define FSPI_MCR2_ABRCADDR BIT(2)
+#define FSPI_MCR2_ABRRADDR BIT(1)
+#define FSPI_MCR2_ABR_CMD BIT(0)
+
+#define FSPI_AHBCR 0x0c
+#define FSPI_AHBCR_RDADDROPT BIT(6)
+#define FSPI_AHBCR_PREF_EN BIT(5)
+#define FSPI_AHBCR_BUFF_EN BIT(4)
+#define FSPI_AHBCR_CACH_EN BIT(3)
+#define FSPI_AHBCR_CLRTXBUF BIT(2)
+#define FSPI_AHBCR_CLRRXBUF BIT(1)
+#define FSPI_AHBCR_PAR_EN BIT(0)
+
+#define FSPI_INTEN 0x10
+#define FSPI_INTEN_SCLKSBWR BIT(9)
+#define FSPI_INTEN_SCLKSBRD BIT(8)
+#define FSPI_INTEN_DATALRNFL BIT(7)
+#define FSPI_INTEN_IPTXWE BIT(6)
+#define FSPI_INTEN_IPRXWA BIT(5)
+#define FSPI_INTEN_AHBCMDERR BIT(4)
+#define FSPI_INTEN_IPCMDERR BIT(3)
+#define FSPI_INTEN_AHBCMDGE BIT(2)
+#define FSPI_INTEN_IPCMDGE BIT(1)
+#define FSPI_INTEN_IPCMDDONE BIT(0)
+
+#define FSPI_INTR 0x14
+#define FSPI_INTR_SCLKSBWR BIT(9)
+#define FSPI_INTR_SCLKSBRD BIT(8)
+#define FSPI_INTR_DATALRNFL BIT(7)
+#define FSPI_INTR_IPTXWE BIT(6)
+#define FSPI_INTR_IPRXWA BIT(5)
+#define FSPI_INTR_AHBCMDERR BIT(4)
+#define FSPI_INTR_IPCMDERR BIT(3)
+#define FSPI_INTR_AHBCMDGE BIT(2)
+#define FSPI_INTR_IPCMDGE BIT(1)
+#define FSPI_INTR_IPCMDDONE BIT(0)
+
+#define FSPI_LUTKEY 0x18
+#define FSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define FSPI_LCKCR 0x1C
+
+#define FSPI_LCKER_LOCK 0x1
+#define FSPI_LCKER_UNLOCK 0x2
+
+#define FSPI_BUFXCR_INVALID_MSTRID 0xE
+#define FSPI_AHBRX_BUF0CR0 0x20
+#define FSPI_AHBRX_BUF1CR0 0x24
+#define FSPI_AHBRX_BUF2CR0 0x28
+#define FSPI_AHBRX_BUF3CR0 0x2C
+#define FSPI_AHBRX_BUF4CR0 0x30
+#define FSPI_AHBRX_BUF5CR0 0x34
+#define FSPI_AHBRX_BUF6CR0 0x38
+#define FSPI_AHBRX_BUF7CR0 0x3C
+#define FSPI_AHBRXBUF0CR7_PREF BIT(31)
+
+#define FSPI_AHBRX_BUF0CR1 0x40
+#define FSPI_AHBRX_BUF1CR1 0x44
+#define FSPI_AHBRX_BUF2CR1 0x48
+#define FSPI_AHBRX_BUF3CR1 0x4C
+#define FSPI_AHBRX_BUF4CR1 0x50
+#define FSPI_AHBRX_BUF5CR1 0x54
+#define FSPI_AHBRX_BUF6CR1 0x58
+#define FSPI_AHBRX_BUF7CR1 0x5C
+
+#define FSPI_FLSHA1CR0 0x60
+#define FSPI_FLSHA2CR0 0x64
+#define FSPI_FLSHB1CR0 0x68
+#define FSPI_FLSHB2CR0 0x6C
+#define FSPI_FLSHXCR0_SZ_KB 10
+#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB)
+
+#define FSPI_FLSHA1CR1 0x70
+#define FSPI_FLSHA2CR1 0x74
+#define FSPI_FLSHB1CR1 0x78
+#define FSPI_FLSHB2CR1 0x7C
+#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16)
+#define FSPI_FLSHXCR1_CAS(x) ((x) << 11)
+#define FSPI_FLSHXCR1_WA BIT(10)
+#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5)
+#define FSPI_FLSHXCR1_TCSS(x) (x)
+
+#define FSPI_FLSHA1CR2 0x80
+#define FSPI_FLSHA2CR2 0x84
+#define FSPI_FLSHB1CR2 0x88
+#define FSPI_FLSHB2CR2 0x8C
+#define FSPI_FLSHXCR2_CLRINSP BIT(24)
+#define FSPI_FLSHXCR2_AWRWAIT BIT(16)
+#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13
+#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8
+#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5
+#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0
+
+#define FSPI_IPCR0 0xA0
+
+#define FSPI_IPCR1 0xA4
+#define FSPI_IPCR1_IPAREN BIT(31)
+#define FSPI_IPCR1_SEQNUM_SHIFT 24
+#define FSPI_IPCR1_SEQID_SHIFT 16
+#define FSPI_IPCR1_IDATSZ(x) (x)
+
+#define FSPI_IPCMD 0xB0
+#define FSPI_IPCMD_TRG BIT(0)
+
+#define FSPI_DLPR 0xB4
+
+#define FSPI_IPRXFCR 0xB8
+#define FSPI_IPRXFCR_CLR BIT(0)
+#define FSPI_IPRXFCR_DMA_EN BIT(1)
+#define FSPI_IPRXFCR_WMRK(x) ((x) << 2)
+
+#define FSPI_IPTXFCR 0xBC
+#define FSPI_IPTXFCR_CLR BIT(0)
+#define FSPI_IPTXFCR_DMA_EN BIT(1)
+#define FSPI_IPTXFCR_WMRK(x) ((x) << 2)
+
+#define FSPI_DLLACR 0xC0
+#define FSPI_DLLACR_OVRDEN BIT(8)
+
+#define FSPI_DLLBCR 0xC4
+#define FSPI_DLLBCR_OVRDEN BIT(8)
+
+#define FSPI_STS0 0xE0
+#define FSPI_STS0_DLPHB(x) ((x) << 8)
+#define FSPI_STS0_DLPHA(x) ((x) << 4)
+#define FSPI_STS0_CMD_SRC(x) ((x) << 2)
+#define FSPI_STS0_ARB_IDLE BIT(1)
+#define FSPI_STS0_SEQ_IDLE BIT(0)
+
+#define FSPI_STS1 0xE4
+#define FSPI_STS1_IP_ERRCD(x) ((x) << 24)
+#define FSPI_STS1_IP_ERRID(x) ((x) << 16)
+#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8)
+#define FSPI_STS1_AHB_ERRID(x) (x)
+
+#define FSPI_AHBSPNST 0xEC
+#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16)
+#define FSPI_AHBSPNST_BUFID(x) ((x) << 1)
+#define FSPI_AHBSPNST_ACTIVE BIT(0)
+
+#define FSPI_IPRXFSTS 0xF0
+#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16)
+#define FSPI_IPRXFSTS_FILL(x) (x)
+
+#define FSPI_IPTXFSTS 0xF4
+#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16)
+#define FSPI_IPTXFSTS_FILL(x) (x)
+
+#define FSPI_RFDR 0x100
+#define FSPI_TFDR 0x180
+
+#define FSPI_LUT_BASE 0x200
+#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+#define FSPI_LUT_REG(idx) \
+ (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
+
+/* register map end */
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP 0x00
+#define LUT_CMD 0x01
+#define LUT_ADDR 0x02
+#define LUT_CADDR_SDR 0x03
+#define LUT_MODE 0x04
+#define LUT_MODE2 0x05
+#define LUT_MODE4 0x06
+#define LUT_MODE8 0x07
+#define LUT_NXP_WRITE 0x08
+#define LUT_NXP_READ 0x09
+#define LUT_LEARN_SDR 0x0A
+#define LUT_DATSZ_SDR 0x0B
+#define LUT_DUMMY 0x0C
+#define LUT_DUMMY_RWDS_SDR 0x0D
+#define LUT_JMP_ON_CS 0x1F
+#define LUT_CMD_DDR 0x21
+#define LUT_ADDR_DDR 0x22
+#define LUT_CADDR_DDR 0x23
+#define LUT_MODE_DDR 0x24
+#define LUT_MODE2_DDR 0x25
+#define LUT_MODE4_DDR 0x26
+#define LUT_MODE8_DDR 0x27
+#define LUT_WRITE_DDR 0x28
+#define LUT_READ_DDR 0x29
+#define LUT_LEARN_DDR 0x2A
+#define LUT_DATSZ_DDR 0x2B
+#define LUT_DUMMY_DDR 0x2C
+#define LUT_DUMMY_RWDS_DDR 0x2D
+
+/*
+ * Calculate number of required PAD bits for LUT register.
+ *
+ * The pad stands for the number of IO lines [0:7].
+ * For example, the octal read needs eight IO lines,
+ * so you should use LUT_PAD(8). This macro
+ * returns 3 i.e. use eight (2^3) IP lines for read.
+ */
+#define LUT_PAD(x) (fls(x) - 1)
+
+/*
+ * Macro for constructing the LUT entries with the following
+ * register layout:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define PAD_SHIFT 8
+#define INSTR_SHIFT 10
+#define OPRND_SHIFT 16
+
+/* Macros for constructing the LUT register. */
+#define LUT_DEF(idx, ins, pad, opr) \
+ ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \
+ (opr)) << (((idx) % 2) * OPRND_SHIFT))
+
+#define POLL_TOUT 5000
+#define NXP_FSPI_MAX_CHIPSELECT 4
+
+struct nxp_fspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
+ bool little_endian;
+};
+
+static const struct nxp_fspi_devtype_data lx2160a_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+struct nxp_fspi {
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ u32 memmap_phy_size;
+ struct clk *clk, *clk_en;
+ struct device *dev;
+ struct completion c;
+ const struct nxp_fspi_devtype_data *devtype_data;
+ struct mutex lock;
+ struct pm_qos_request pm_qos_req;
+ int selected;
+};
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The FSPI controller's endianness is independent of
+ * the CPU core's endianness. So far, although the CPU
+ * core is little-endian the FSPI controller can use
+ * big-endian or little-endian.
+ */
+static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr)
+{
+ if (f->devtype_data->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr)
+{
+ if (f->devtype_data->little_endian)
+ return ioread32(addr);
+ else
+ return ioread32be(addr);
+}
+
+static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id)
+{
+ struct nxp_fspi *f = dev_id;
+ u32 reg;
+
+ /* clear interrupt */
+ reg = fspi_readl(f, f->iobase + FSPI_INTR);
+ fspi_writel(f, FSPI_INTR_IPCMDDONE, f->iobase + FSPI_INTR);
+
+ if (reg & FSPI_INTR_IPCMDDONE)
+ complete(&f->c);
+
+ return IRQ_HANDLED;
+}
+
+static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width)
+{
+ switch (width) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool nxp_fspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth);
+
+ if (op->addr.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth);
+
+ if (op->dummy.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->data.buswidth);
+
+ if (ret)
+ return false;
+
+ /*
+ * The number of address bytes should be equal to or less than 4 bytes.
+ */
+ if (op->addr.nbytes > 4)
+ return false;
+
+ /*
+ * If requested address value is greater than controller assigned
+ * memory mapped space, return error as it didn't fit in the range
+ * of assigned address space.
+ */
+ if (op->addr.val >= f->memmap_phy_size)
+ return false;
+
+ /* Max 64 dummy clock cycles supported */
+ if (op->dummy.buswidth &&
+ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
+ return false;
+
+ /* Max data length, check controller limits and alignment */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ (op->data.nbytes > f->devtype_data->ahb_buf_size ||
+ (op->data.nbytes > f->devtype_data->rxfifo - 4 &&
+ !IS_ALIGNED(op->data.nbytes, 8))))
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ op->data.nbytes > f->devtype_data->txfifo)
+ return false;
+
+ return true;
+}
+
+/* Instead of busy looping invoke readl_poll_timeout functionality. */
+static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base,
+ u32 mask, u32 delay_us,
+ u32 timeout_us, bool c)
+{
+ u32 reg;
+
+ if (!f->devtype_data->little_endian)
+ mask = (u32)cpu_to_be32(mask);
+
+ if (c)
+ return readl_poll_timeout(base, reg, (reg & mask),
+ delay_us, timeout_us);
+ else
+ return readl_poll_timeout(base, reg, !(reg & mask),
+ delay_us, timeout_us);
+}
+
+/*
+ * If the slave device content being changed by Write/Erase, need to
+ * invalidate the AHB buffer. This can be achieved by doing the reset
+ * of controller after setting MCR0[SWRESET] bit.
+ */
+static inline void nxp_fspi_invalid(struct nxp_fspi *f)
+{
+ u32 reg;
+ int ret;
+
+ reg = fspi_readl(f, f->iobase + FSPI_MCR0);
+ fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0);
+
+ /* w1c register, wait unit clear */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
+ FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
+ WARN_ON(ret);
+}
+
+static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
+
+ /* cmd */
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+
+ /* addr bytes */
+ if (op->addr.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
+ LUT_PAD(op->addr.buswidth),
+ op->addr.nbytes * 8);
+ lutidx++;
+ }
+
+ /* dummy bytes, if needed */
+ if (op->dummy.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ /*
+ * Due to FlexSPI controller limitation number of PAD for dummy
+ * buswidth needs to be programmed as equal to data buswidth.
+ */
+ LUT_PAD(op->data.buswidth),
+ op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ lutidx++;
+ }
+
+ /* read/write data bytes */
+ if (op->data.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ LUT_NXP_READ : LUT_NXP_WRITE,
+ LUT_PAD(op->data.buswidth),
+ 0);
+ lutidx++;
+ }
+
+ /* stop condition. */
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
+
+ /* unlock LUT */
+ fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
+ fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
+
+ /* fill LUT */
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
+
+ dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n",
+ op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]);
+
+ /* lock LUT */
+ fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
+ fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR);
+}
+
+static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
+{
+ int ret;
+
+ ret = clk_prepare_enable(f->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(f->clk);
+ if (ret) {
+ clk_disable_unprepare(f->clk_en);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
+{
+ clk_disable_unprepare(f->clk);
+ clk_disable_unprepare(f->clk_en);
+}
+
+/*
+ * In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0
+ * register and start base address of the slave device.
+ *
+ * (Higher address)
+ * -------- <-- FLSHB2CR0
+ * | B2 |
+ * | |
+ * B2 start address --> -------- <-- FLSHB1CR0
+ * | B1 |
+ * | |
+ * B1 start address --> -------- <-- FLSHA2CR0
+ * | A2 |
+ * | |
+ * A2 start address --> -------- <-- FLSHA1CR0
+ * | A1 |
+ * | |
+ * A1 start address --> -------- (Lower address)
+ *
+ *
+ * Start base address defines the starting address range for given CS and
+ * FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS.
+ *
+ * But, different targets are having different combinations of number of CS,
+ * some targets only have single CS or two CS covering controller's full
+ * memory mapped space area.
+ * Thus, implementation is being done as independent of the size and number
+ * of the connected slave device.
+ * Assign controller memory mapped space size as the size to the connected
+ * slave device.
+ * Mark FLSHxxCR0 as zero initially and then assign value only to the selected
+ * chip-select Flash configuration register.
+ *
+ * For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the
+ * memory mapped size of the controller.
+ * Value for rest of the CS FLSHxxCR0 register would be zero.
+ *
+ */
+static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
+{
+ unsigned long rate = spi->max_speed_hz;
+ int ret;
+ uint64_t size_kb;
+
+ /*
+ * Return, if previously selected slave device is same as current
+ * requested slave device.
+ */
+ if (f->selected == spi->chip_select)
+ return;
+
+ /* Reset FLSHxxCR0 registers */
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0);
+
+ /* Assign controller memory mapped space as size, KBytes, of flash. */
+ size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size);
+
+ fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 +
+ 4 * spi->chip_select);
+
+ dev_dbg(f->dev, "Slave device [CS:%x] selected\n", spi->chip_select);
+
+ nxp_fspi_clk_disable_unprep(f);
+
+ ret = clk_set_rate(f->clk, rate);
+ if (ret)
+ return;
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return;
+
+ f->selected = spi->chip_select;
+}
+
+static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+{
+ u32 len = op->data.nbytes;
+
+ /* Read out the data directly from the AHB buffer. */
+ memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len);
+}
+
+static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int i, ret;
+ u8 *buf = (u8 *) op->data.buf.out;
+
+ /* clear the TX FIFO. */
+ fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR);
+
+ /*
+ * Default value of water mark level is 8 bytes, hence in single
+ * write request controller can write max 8 bytes of data.
+ */
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) {
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ fspi_writel(f, *(u32 *) (buf + i), base + FSPI_TFDR);
+ fspi_writel(f, *(u32 *) (buf + i + 4), base + FSPI_TFDR + 4);
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+ }
+
+ if (i < op->data.nbytes) {
+ u32 data = 0;
+ int j;
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
+ memcpy(&data, buf + i + j, 4);
+ fspi_writel(f, data, base + FSPI_TFDR + j);
+ }
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+ }
+}
+
+static void nxp_fspi_read_rxfifo(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int i, ret;
+ int len = op->data.nbytes;
+ u8 *buf = (u8 *) op->data.buf.in;
+
+ /*
+ * Default value of water mark level is 8 bytes, hence in single
+ * read request controller can read max 8 bytes of data.
+ */
+ for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) {
+ /* Wait for RXFIFO available */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPRXWA, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ *(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR);
+ *(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4);
+ /* move the FIFO pointer */
+ fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
+ }
+
+ if (i < len) {
+ u32 tmp;
+ int size, j;
+
+ buf = op->data.buf.in + i;
+ /* Wait for RXFIFO available */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPRXWA, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ len = op->data.nbytes - i;
+ for (j = 0; j < op->data.nbytes - i; j += 4) {
+ tmp = fspi_readl(f, base + FSPI_RFDR + j);
+ size = min(len, 4);
+ memcpy(buf + j, &tmp, size);
+ len -= size;
+ }
+ }
+
+ /* invalid the RXFIFO */
+ fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR);
+ /* move the FIFO pointer */
+ fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
+}
+
+static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int seqnum = 0;
+ int err = 0;
+ u32 reg;
+
+ reg = fspi_readl(f, base + FSPI_IPRXFCR);
+ /* invalid RXFIFO first */
+ reg &= ~FSPI_IPRXFCR_DMA_EN;
+ reg = reg | FSPI_IPRXFCR_CLR;
+ fspi_writel(f, reg, base + FSPI_IPRXFCR);
+
+ init_completion(&f->c);
+
+ fspi_writel(f, op->addr.val, base + FSPI_IPCR0);
+ /*
+ * Always start the sequence at the same index since we update
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
+ fspi_writel(f, op->data.nbytes |
+ (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) |
+ (seqnum << FSPI_IPCR1_SEQNUM_SHIFT),
+ base + FSPI_IPCR1);
+
+ /* Trigger the LUT now. */
+ fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD);
+
+ /* Wait for the interrupt. */
+ if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+ /* Invoke IP data read, if request is of data read. */
+ if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ nxp_fspi_read_rxfifo(f, op);
+
+ return err;
+}
+
+static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ int err = 0;
+
+ mutex_lock(&f->lock);
+
+ /* Wait for controller being ready. */
+ err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
+ FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
+ WARN_ON(err);
+
+ nxp_fspi_select_mem(f, mem->spi);
+
+ nxp_fspi_prepare_lut(f, op);
+ /*
+ * If we have large chunks of data, we read them through the AHB bus
+ * by accessing the mapped memory. In all other cases we use
+ * IP commands to access the flash.
+ */
+ if (op->data.nbytes > (f->devtype_data->rxfifo - 4) &&
+ op->data.dir == SPI_MEM_DATA_IN) {
+ nxp_fspi_read_ahb(f, op);
+ } else {
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ nxp_fspi_fill_txfifo(f, op);
+
+ err = nxp_fspi_do_op(f, op);
+ }
+
+ /* Invalidate the data in the AHB buffer. */
+ nxp_fspi_invalid(f);
+
+ mutex_unlock(&f->lock);
+
+ return err;
+}
+
+static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes > f->devtype_data->txfifo)
+ op->data.nbytes = f->devtype_data->txfifo;
+ } else {
+ if (op->data.nbytes > f->devtype_data->ahb_buf_size)
+ op->data.nbytes = f->devtype_data->ahb_buf_size;
+ else if (op->data.nbytes > (f->devtype_data->rxfifo - 4))
+ op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
+ }
+
+ return 0;
+}
+
+static int nxp_fspi_default_setup(struct nxp_fspi *f)
+{
+ void __iomem *base = f->iobase;
+ int ret, i;
+ u32 reg;
+
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ nxp_fspi_clk_disable_unprep(f);
+
+ /* the default frequency, we will change it later if necessary. */
+ ret = clk_set_rate(f->clk, 20000000);
+ if (ret)
+ return ret;
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return ret;
+
+ /* Reset the module */
+ /* w1c register, wait unit clear */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
+ FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
+ WARN_ON(ret);
+
+ /* Disable the module */
+ fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0);
+
+ /* Reset the DLL register to default value */
+ fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR);
+ fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
+
+ /* enable module */
+ fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF),
+ base + FSPI_MCR0);
+
+ /*
+ * Disable same device enable bit and configure all slave devices
+ * independently.
+ */
+ reg = fspi_readl(f, f->iobase + FSPI_MCR2);
+ reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN);
+ fspi_writel(f, reg, base + FSPI_MCR2);
+
+ /* AHB configuration for access buffer 0~7. */
+ for (i = 0; i < 7; i++)
+ fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i);
+
+ /*
+ * Set ADATSZ with the maximum AHB buffer size to improve the read
+ * performance.
+ */
+ fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 |
+ FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0);
+
+ /* prefetch and no start address alignment limitation */
+ fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
+ base + FSPI_AHBCR);
+
+ /* AHB Read - Set lut sequence ID for all CS. */
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2);
+
+ f->selected = -1;
+
+ /* enable the interrupt */
+ fspi_writel(f, FSPI_INTEN_IPCMDDONE, base + FSPI_INTEN);
+
+ return 0;
+}
+
+static const char *nxp_fspi_get_name(struct spi_mem *mem)
+{
+ struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
+ struct device *dev = &mem->spi->dev;
+ const char *name;
+
+ // Set custom name derived from the platform_device of the controller.
+ if (of_get_available_child_count(f->dev->of_node) == 1)
+ return dev_name(f->dev);
+
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", dev_name(f->dev),
+ mem->spi->chip_select);
+
+ if (!name) {
+ dev_err(dev, "failed to get memory for custom flash name\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return name;
+}
+
+static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
+ .adjust_op_size = nxp_fspi_adjust_op_size,
+ .supports_op = nxp_fspi_supports_op,
+ .exec_op = nxp_fspi_exec_op,
+ .get_name = nxp_fspi_get_name,
+};
+
+static int nxp_fspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct nxp_fspi *f;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*f));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL |
+ SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL;
+
+ f = spi_controller_get_devdata(ctlr);
+ f->dev = dev;
+ f->devtype_data = of_device_get_match_data(dev);
+ if (!f->devtype_data) {
+ ret = -ENODEV;
+ goto err_put_ctrl;
+ }
+
+ platform_set_drvdata(pdev, f);
+
+ /* find the resources - configuration register address space */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_base");
+ f->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(f->iobase)) {
+ ret = PTR_ERR(f->iobase);
+ goto err_put_ctrl;
+ }
+
+ /* find the resources - controller memory mapped space */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
+ f->ahb_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(f->ahb_addr)) {
+ ret = PTR_ERR(f->ahb_addr);
+ goto err_put_ctrl;
+ }
+
+ /* assign memory mapped starting address and mapped size. */
+ f->memmap_phy = res->start;
+ f->memmap_phy_size = resource_size(res);
+
+ /* find the clocks */
+ f->clk_en = devm_clk_get(dev, "fspi_en");
+ if (IS_ERR(f->clk_en)) {
+ ret = PTR_ERR(f->clk_en);
+ goto err_put_ctrl;
+ }
+
+ f->clk = devm_clk_get(dev, "fspi");
+ if (IS_ERR(f->clk)) {
+ ret = PTR_ERR(f->clk);
+ goto err_put_ctrl;
+ }
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ goto err_put_ctrl;
+ }
+
+ /* find the irq */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to get the irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ ret = devm_request_irq(dev, ret,
+ nxp_fspi_irq_handler, 0, pdev->name, f);
+ if (ret) {
+ dev_err(dev, "failed to request irq: %d\n", ret);
+ goto err_disable_clk;
+ }
+
+ mutex_init(&f->lock);
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
+ ctlr->mem_ops = &nxp_fspi_mem_ops;
+
+ nxp_fspi_default_setup(f);
+
+ ctlr->dev.of_node = np;
+
+ ret = spi_register_controller(ctlr);
+ if (ret)
+ goto err_destroy_mutex;
+
+ return 0;
+
+err_destroy_mutex:
+ mutex_destroy(&f->lock);
+
+err_disable_clk:
+ nxp_fspi_clk_disable_unprep(f);
+
+err_put_ctrl:
+ spi_controller_put(ctlr);
+
+ dev_err(dev, "NXP FSPI probe failed\n");
+ return ret;
+}
+
+static int nxp_fspi_remove(struct platform_device *pdev)
+{
+ struct nxp_fspi *f = platform_get_drvdata(pdev);
+
+ /* disable the hardware */
+ fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
+
+ nxp_fspi_clk_disable_unprep(f);
+
+ mutex_destroy(&f->lock);
+
+ return 0;
+}
+
+static int nxp_fspi_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int nxp_fspi_resume(struct device *dev)
+{
+ struct nxp_fspi *f = dev_get_drvdata(dev);
+
+ nxp_fspi_default_setup(f);
+
+ return 0;
+}
+
+static const struct of_device_id nxp_fspi_dt_ids[] = {
+ { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
+
+static const struct dev_pm_ops nxp_fspi_pm_ops = {
+ .suspend = nxp_fspi_suspend,
+ .resume = nxp_fspi_resume,
+};
+
+static struct platform_driver nxp_fspi_driver = {
+ .driver = {
+ .name = "nxp-fspi",
+ .of_match_table = nxp_fspi_dt_ids,
+ .pm = &nxp_fspi_pm_ops,
+ },
+ .probe = nxp_fspi_probe,
+ .remove = nxp_fspi_remove,
+};
+module_platform_driver(nxp_fspi_driver);
+
+MODULE_DESCRIPTION("NXP FSPI Controller Driver");
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_AUTHOR("Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>");
+MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
+MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 2fd8881fcd65..8be304379628 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -623,8 +623,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
cfg.src_addr_width = width;
cfg.dst_addr_width = width;
- cfg.src_maxburst = es;
- cfg.dst_maxburst = es;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 0c793e31d60f..26684178786f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -253,6 +253,7 @@
#define STATE_RUNNING ((void *) 1)
#define STATE_DONE ((void *) 2)
#define STATE_ERROR ((void *) -1)
+#define STATE_TIMEOUT ((void *) -2)
/*
* SSP State - Whether Enabled or Disabled
@@ -1484,6 +1485,30 @@ err_config_dma:
writew(irqflags, SSP_IMSC(pl022->virtbase));
}
+static void print_current_status(struct pl022 *pl022)
+{
+ u32 read_cr0;
+ u16 read_cr1, read_dmacr, read_sr;
+
+ if (pl022->vendor->extended_cr)
+ read_cr0 = readl(SSP_CR0(pl022->virtbase));
+ else
+ read_cr0 = readw(SSP_CR0(pl022->virtbase));
+ read_cr1 = readw(SSP_CR1(pl022->virtbase));
+ read_dmacr = readw(SSP_DMACR(pl022->virtbase));
+ read_sr = readw(SSP_SR(pl022->virtbase));
+
+ dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
+ dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
+ dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
+ dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
+ dev_warn(&pl022->adev->dev,
+ "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
+ pl022->exp_fifo_level,
+ pl022->vendor->fifodepth);
+
+}
+
static void do_polling_transfer(struct pl022 *pl022)
{
struct spi_message *message = NULL;
@@ -1535,7 +1560,8 @@ static void do_polling_transfer(struct pl022 *pl022)
if (time_after(time, timeout)) {
dev_warn(&pl022->adev->dev,
"%s: timeout!\n", __func__);
- message->state = STATE_ERROR;
+ message->state = STATE_TIMEOUT;
+ print_current_status(pl022);
goto out;
}
cpu_relax();
@@ -1553,6 +1579,8 @@ out:
/* Handle end of message */
if (message->state == STATE_DONE)
message->status = 0;
+ else if (message->state == STATE_TIMEOUT)
+ message->status = -EAGAIN;
else
message->status = -EIO;
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 2fa7f4b43492..15592598273e 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -23,7 +23,7 @@
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
bool error)
{
- struct spi_message *msg = drv_data->master->cur_msg;
+ struct spi_message *msg = drv_data->controller->cur_msg;
/*
* It is possible that one CPU is handling ROR interrupt and other
@@ -59,7 +59,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
msg->status = -EIO;
}
- spi_finalize_current_transfer(drv_data->master);
+ spi_finalize_current_transfer(drv_data->controller);
}
}
@@ -74,7 +74,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
struct spi_transfer *xfer)
{
struct chip_data *chip =
- spi_get_ctldata(drv_data->master->cur_msg->spi);
+ spi_get_ctldata(drv_data->controller->cur_msg->spi);
enum dma_slave_buswidth width;
struct dma_slave_config cfg;
struct dma_chan *chan;
@@ -102,14 +102,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
cfg.dst_maxburst = chip->dma_burst_size;
sgt = &xfer->tx_sg;
- chan = drv_data->master->dma_tx;
+ chan = drv_data->controller->dma_tx;
} else {
cfg.src_addr = drv_data->ssdr_physical;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
sgt = &xfer->rx_sg;
- chan = drv_data->master->dma_rx;
+ chan = drv_data->controller->dma_rx;
}
ret = dmaengine_slave_config(chan, &cfg);
@@ -130,8 +130,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
if (status & SSSR_ROR) {
dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
- dmaengine_terminate_async(drv_data->master->dma_rx);
- dmaengine_terminate_async(drv_data->master->dma_tx);
+ dmaengine_terminate_async(drv_data->controller->dma_rx);
+ dmaengine_terminate_async(drv_data->controller->dma_tx);
pxa2xx_spi_dma_transfer_complete(drv_data, true);
return IRQ_HANDLED;
@@ -171,15 +171,15 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
return 0;
err_rx:
- dmaengine_terminate_async(drv_data->master->dma_tx);
+ dmaengine_terminate_async(drv_data->controller->dma_tx);
err_tx:
return err;
}
void pxa2xx_spi_dma_start(struct driver_data *drv_data)
{
- dma_async_issue_pending(drv_data->master->dma_rx);
- dma_async_issue_pending(drv_data->master->dma_tx);
+ dma_async_issue_pending(drv_data->controller->dma_rx);
+ dma_async_issue_pending(drv_data->controller->dma_tx);
atomic_set(&drv_data->dma_running, 1);
}
@@ -187,30 +187,30 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data)
void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
{
atomic_set(&drv_data->dma_running, 0);
- dmaengine_terminate_sync(drv_data->master->dma_rx);
- dmaengine_terminate_sync(drv_data->master->dma_tx);
+ dmaengine_terminate_sync(drv_data->controller->dma_rx);
+ dmaengine_terminate_sync(drv_data->controller->dma_tx);
}
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
- struct pxa2xx_spi_master *pdata = drv_data->master_info;
+ struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
struct device *dev = &drv_data->pdev->dev;
- struct spi_controller *master = drv_data->master;
+ struct spi_controller *controller = drv_data->controller;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- master->dma_tx = dma_request_slave_channel_compat(mask,
+ controller->dma_tx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->tx_param, dev, "tx");
- if (!master->dma_tx)
+ if (!controller->dma_tx)
return -ENODEV;
- master->dma_rx = dma_request_slave_channel_compat(mask,
+ controller->dma_rx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->rx_param, dev, "rx");
- if (!master->dma_rx) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (!controller->dma_rx) {
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
return -ENODEV;
}
@@ -219,17 +219,17 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
void pxa2xx_spi_dma_release(struct driver_data *drv_data)
{
- struct spi_controller *master = drv_data->master;
+ struct spi_controller *controller = drv_data->controller;
- if (master->dma_rx) {
- dmaengine_terminate_sync(master->dma_rx);
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (controller->dma_rx) {
+ dmaengine_terminate_sync(controller->dma_rx);
+ dma_release_channel(controller->dma_rx);
+ controller->dma_rx = NULL;
}
- if (master->dma_tx) {
- dmaengine_terminate_sync(master->dma_tx);
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (controller->dma_tx) {
+ dmaengine_terminate_sync(controller->dma_tx);
+ dma_release_channel(controller->dma_tx);
+ controller->dma_tx = NULL;
}
}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 869f188b02eb..1727fdfbac28 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -197,7 +197,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
struct platform_device_info pi;
int ret;
struct platform_device *pdev;
- struct pxa2xx_spi_master spi_pdata;
+ struct pxa2xx_spi_controller spi_pdata;
struct ssp_device *ssp;
struct pxa_spi_info *c;
char buf[40];
@@ -265,7 +265,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
struct platform_device *pdev = pci_get_drvdata(dev);
- struct pxa2xx_spi_master *spi_pdata;
+ struct pxa2xx_spi_controller *spi_pdata;
spi_pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index d84b893a64d7..b6ddba833d02 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -328,7 +328,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
/* Enable multiblock DMA transfers */
- if (drv_data->master_info->enable_dma) {
+ if (drv_data->controller_info->enable_dma) {
__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
if (config->reg_general >= 0) {
@@ -368,7 +368,7 @@ static void lpss_ssp_select_cs(struct spi_device *spi,
__lpss_ssp_write_priv(drv_data,
config->reg_cs_ctrl, value);
ndelay(1000000000 /
- (drv_data->master->max_speed_hz / 2));
+ (drv_data->controller->max_speed_hz / 2));
}
}
@@ -567,7 +567,7 @@ static int u32_reader(struct driver_data *drv_data)
static void reset_sccr1(struct driver_data *drv_data)
{
struct chip_data *chip =
- spi_get_ctldata(drv_data->master->cur_msg->spi);
+ spi_get_ctldata(drv_data->controller->cur_msg->spi);
u32 sccr1_reg;
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
@@ -599,8 +599,8 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
dev_err(&drv_data->pdev->dev, "%s\n", msg);
- drv_data->master->cur_msg->status = -EIO;
- spi_finalize_current_transfer(drv_data->master);
+ drv_data->controller->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(drv_data->controller);
}
static void int_transfer_complete(struct driver_data *drv_data)
@@ -611,7 +611,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
- spi_finalize_current_transfer(drv_data->master);
+ spi_finalize_current_transfer(drv_data->controller);
}
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
@@ -747,7 +747,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
- if (!drv_data->master->cur_msg) {
+ if (!drv_data->controller->cur_msg) {
handle_bad_msg(drv_data);
/* Never fail */
return IRQ_HANDLED;
@@ -879,7 +879,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
- unsigned long ssp_clk = drv_data->master->max_speed_hz;
+ unsigned long ssp_clk = drv_data->controller->max_speed_hz;
const struct ssp_device *ssp = drv_data->ssp;
rate = min_t(int, ssp_clk, rate);
@@ -894,7 +894,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
int rate)
{
struct chip_data *chip =
- spi_get_ctldata(drv_data->master->cur_msg->spi);
+ spi_get_ctldata(drv_data->controller->cur_msg->spi);
unsigned int clk_div;
switch (drv_data->ssp_type) {
@@ -908,7 +908,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
return clk_div << 8;
}
-static bool pxa2xx_spi_can_dma(struct spi_controller *master,
+static bool pxa2xx_spi_can_dma(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *xfer)
{
@@ -919,12 +919,12 @@ static bool pxa2xx_spi_can_dma(struct spi_controller *master,
xfer->len >= chip->dma_burst_size;
}
-static int pxa2xx_spi_transfer_one(struct spi_controller *master,
+static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
- struct spi_message *message = master->cur_msg;
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
+ struct spi_message *message = controller->cur_msg;
struct chip_data *chip = spi_get_ctldata(message->spi);
u32 dma_thresh = chip->dma_threshold;
u32 dma_burst = chip->dma_burst_size;
@@ -1006,9 +1006,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
"DMA burst size reduced to match bits_per_word\n");
}
- dma_mapped = master->can_dma &&
- master->can_dma(master, message->spi, transfer) &&
- master->cur_msg_mapped;
+ dma_mapped = controller->can_dma &&
+ controller->can_dma(controller, message->spi, transfer) &&
+ controller->cur_msg_mapped;
if (dma_mapped) {
/* Ensure we have the correct interrupt handler */
@@ -1036,12 +1036,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
- master->max_speed_hz
+ controller->max_speed_hz
/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
else
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
- master->max_speed_hz / 2
+ controller->max_speed_hz / 2
/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
@@ -1092,7 +1092,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
}
}
- if (spi_controller_is_slave(master)) {
+ if (spi_controller_is_slave(controller)) {
while (drv_data->write(drv_data))
;
if (drv_data->gpiod_ready) {
@@ -1111,9 +1111,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
return 1;
}
-static int pxa2xx_spi_slave_abort(struct spi_master *master)
+static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Stop and reset SSP */
write_SSSR_CS(drv_data, drv_data->clear_sr);
@@ -1126,16 +1126,16 @@ static int pxa2xx_spi_slave_abort(struct spi_master *master)
dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
- drv_data->master->cur_msg->status = -EINTR;
- spi_finalize_current_transfer(drv_data->master);
+ drv_data->controller->cur_msg->status = -EINTR;
+ spi_finalize_current_transfer(drv_data->controller);
return 0;
}
-static void pxa2xx_spi_handle_err(struct spi_controller *master,
+static void pxa2xx_spi_handle_err(struct spi_controller *controller,
struct spi_message *msg)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP */
pxa2xx_spi_write(drv_data, SSCR0,
@@ -1159,9 +1159,9 @@ static void pxa2xx_spi_handle_err(struct spi_controller *master,
pxa2xx_spi_dma_stop(drv_data);
}
-static int pxa2xx_spi_unprepare_transfer(struct spi_controller *master)
+static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP now */
pxa2xx_spi_write(drv_data, SSCR0,
@@ -1260,7 +1260,7 @@ static int setup(struct spi_device *spi)
break;
default:
tx_hi_thres = 0;
- if (spi_controller_is_slave(drv_data->master)) {
+ if (spi_controller_is_slave(drv_data->controller)) {
tx_thres = 1;
rx_thres = 2;
} else {
@@ -1287,7 +1287,7 @@ static int setup(struct spi_device *spi)
chip->frm = spi->chip_select;
}
- chip->enable_dma = drv_data->master_info->enable_dma;
+ chip->enable_dma = drv_data->controller_info->enable_dma;
chip->timeout = TIMOUT_DFLT;
}
@@ -1310,7 +1310,7 @@ static int setup(struct spi_device *spi)
if (chip_info->enable_loopback)
chip->cr1 = SSCR1_LBM;
}
- if (spi_controller_is_slave(drv_data->master)) {
+ if (spi_controller_is_slave(drv_data->controller)) {
chip->cr1 |= SSCR1_SCFR;
chip->cr1 |= SSCR1_SCLKDIR;
chip->cr1 |= SSCR1_SFRMDIR;
@@ -1497,10 +1497,10 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
#endif /* CONFIG_PCI */
-static struct pxa2xx_spi_master *
+static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
- struct pxa2xx_spi_master *pdata;
+ struct pxa2xx_spi_controller *pdata;
struct acpi_device *adev;
struct ssp_device *ssp;
struct resource *res;
@@ -1568,10 +1568,10 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
return pdata;
}
-static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
+static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
unsigned int cs)
{
- struct driver_data *drv_data = spi_controller_get_devdata(master);
+ struct driver_data *drv_data = spi_controller_get_devdata(controller);
if (has_acpi_companion(&drv_data->pdev->dev)) {
switch (drv_data->ssp_type) {
@@ -1595,8 +1595,8 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct pxa2xx_spi_master *platform_info;
- struct spi_controller *master;
+ struct pxa2xx_spi_controller *platform_info;
+ struct spi_controller *controller;
struct driver_data *drv_data;
struct ssp_device *ssp;
const struct lpss_config *config;
@@ -1622,37 +1622,37 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
if (platform_info->is_slave)
- master = spi_alloc_slave(dev, sizeof(struct driver_data));
+ controller = spi_alloc_slave(dev, sizeof(struct driver_data));
else
- master = spi_alloc_master(dev, sizeof(struct driver_data));
+ controller = spi_alloc_master(dev, sizeof(struct driver_data));
- if (!master) {
- dev_err(&pdev->dev, "cannot alloc spi_master\n");
+ if (!controller) {
+ dev_err(&pdev->dev, "cannot alloc spi_controller\n");
pxa_ssp_free(ssp);
return -ENOMEM;
}
- drv_data = spi_controller_get_devdata(master);
- drv_data->master = master;
- drv_data->master_info = platform_info;
+ drv_data = spi_controller_get_devdata(controller);
+ drv_data->controller = controller;
+ drv_data->controller_info = platform_info;
drv_data->pdev = pdev;
drv_data->ssp = ssp;
- master->dev.of_node = pdev->dev.of_node;
+ controller->dev.of_node = pdev->dev.of_node;
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
-
- master->bus_num = ssp->port_id;
- master->dma_alignment = DMA_ALIGNMENT;
- master->cleanup = cleanup;
- master->setup = setup;
- master->set_cs = pxa2xx_spi_set_cs;
- master->transfer_one = pxa2xx_spi_transfer_one;
- master->slave_abort = pxa2xx_spi_slave_abort;
- master->handle_err = pxa2xx_spi_handle_err;
- master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
- master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
- master->auto_runtime_pm = true;
- master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+
+ controller->bus_num = ssp->port_id;
+ controller->dma_alignment = DMA_ALIGNMENT;
+ controller->cleanup = cleanup;
+ controller->setup = setup;
+ controller->set_cs = pxa2xx_spi_set_cs;
+ controller->transfer_one = pxa2xx_spi_transfer_one;
+ controller->slave_abort = pxa2xx_spi_slave_abort;
+ controller->handle_err = pxa2xx_spi_handle_err;
+ controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+ controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
+ controller->auto_runtime_pm = true;
+ controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
drv_data->ssp_type = ssp->type;
@@ -1661,10 +1661,10 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (pxa25x_ssp_comp(drv_data)) {
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
break;
default:
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
break;
}
@@ -1673,7 +1673,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->clear_sr = SSSR_ROR;
drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
} else {
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
drv_data->dma_cr1 = DEFAULT_DMA_CR1;
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
@@ -1685,7 +1685,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data);
if (status < 0) {
dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
- goto out_error_master_alloc;
+ goto out_error_controller_alloc;
}
/* Setup DMA if requested */
@@ -1695,7 +1695,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
dev_dbg(dev, "no DMA channels available, using PIO\n");
platform_info->enable_dma = false;
} else {
- master->can_dma = pxa2xx_spi_can_dma;
+ controller->can_dma = pxa2xx_spi_can_dma;
+ controller->max_dma_len = MAX_DMA_LEN;
}
}
@@ -1704,7 +1705,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (status)
goto out_error_dma_irq_alloc;
- master->max_speed_hz = clk_get_rate(ssp->clk);
+ controller->max_speed_hz = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
pxa2xx_spi_write(drv_data, SSCR0, 0);
@@ -1727,7 +1728,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
break;
default:
- if (spi_controller_is_slave(master)) {
+ if (spi_controller_is_slave(controller)) {
tmp = SSCR1_SCFR |
SSCR1_SCLKDIR |
SSCR1_SFRMDIR |
@@ -1740,7 +1741,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_Motorola | SSCR0_DataSize(8);
- if (!spi_controller_is_slave(master))
+ if (!spi_controller_is_slave(controller))
tmp |= SSCR0_SCR(2);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
@@ -1765,24 +1766,24 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
platform_info->num_chipselect = config->cs_num;
}
}
- master->num_chipselect = platform_info->num_chipselect;
+ controller->num_chipselect = platform_info->num_chipselect;
count = gpiod_count(&pdev->dev, "cs");
if (count > 0) {
int i;
- master->num_chipselect = max_t(int, count,
- master->num_chipselect);
+ controller->num_chipselect = max_t(int, count,
+ controller->num_chipselect);
drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
- master->num_chipselect, sizeof(struct gpio_desc *),
+ controller->num_chipselect, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!drv_data->cs_gpiods) {
status = -ENOMEM;
goto out_error_clock_enabled;
}
- for (i = 0; i < master->num_chipselect; i++) {
+ for (i = 0; i < controller->num_chipselect; i++) {
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
@@ -1815,9 +1816,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
- status = devm_spi_register_controller(&pdev->dev, master);
+ status = devm_spi_register_controller(&pdev->dev, controller);
if (status != 0) {
- dev_err(&pdev->dev, "problem registering spi master\n");
+ dev_err(&pdev->dev, "problem registering spi controller\n");
goto out_error_clock_enabled;
}
@@ -1832,8 +1833,8 @@ out_error_dma_irq_alloc:
pxa2xx_spi_dma_release(drv_data);
free_irq(ssp->irq, drv_data);
-out_error_master_alloc:
- spi_controller_put(master);
+out_error_controller_alloc:
+ spi_controller_put(controller);
pxa_ssp_free(ssp);
return status;
}
@@ -1854,7 +1855,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(ssp->clk);
/* Release DMA */
- if (drv_data->master_info->enable_dma)
+ if (drv_data->controller_info->enable_dma)
pxa2xx_spi_dma_release(drv_data);
pm_runtime_put_noidle(&pdev->dev);
@@ -1876,7 +1877,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status;
- status = spi_controller_suspend(drv_data->master);
+ status = spi_controller_suspend(drv_data->controller);
if (status != 0)
return status;
pxa2xx_spi_write(drv_data, SSCR0, 0);
@@ -1901,7 +1902,7 @@ static int pxa2xx_spi_resume(struct device *dev)
}
/* Start the queue running */
- return spi_controller_resume(drv_data->master);
+ return spi_controller_resume(drv_data->controller);
}
#endif
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 4e324da66ef7..aba777b4502d 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -31,10 +31,10 @@ struct driver_data {
/* SPI framework hookup */
enum pxa_ssp_type ssp_type;
- struct spi_controller *master;
+ struct spi_controller *controller;
/* PXA hookup */
- struct pxa2xx_spi_master *master_info;
+ struct pxa2xx_spi_controller *controller_info;
/* SSP register addresses */
void __iomem *ioaddr;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a4ef641b5227..556870dcdf79 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -180,7 +180,7 @@
struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
- struct spi_master *master;
+ struct spi_controller *ctlr;
wait_queue_head_t wait;
struct clk *clk;
u16 spcmd;
@@ -237,8 +237,8 @@ static u16 rspi_read_data(const struct rspi_data *rspi)
/* optional functions */
struct spi_ops {
int (*set_config_register)(struct rspi_data *rspi, int access_size);
- int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
- struct spi_transfer *xfer);
+ int (*transfer_one)(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer);
u16 mode_bits;
u16 flags;
u16 fifo_size;
@@ -466,7 +466,7 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data)
{
int error = rspi_wait_for_tx_empty(rspi);
if (error < 0) {
- dev_err(&rspi->master->dev, "transmit timeout\n");
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return error;
}
rspi_write_data(rspi, data);
@@ -480,7 +480,7 @@ static int rspi_data_in(struct rspi_data *rspi)
error = rspi_wait_for_rx_full(rspi);
if (error < 0) {
- dev_err(&rspi->master->dev, "receive timeout\n");
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
return error;
}
data = rspi_read_data(rspi);
@@ -526,8 +526,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
- desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
- rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
+ rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EAGAIN;
@@ -546,8 +546,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
}
if (tx) {
- desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
- tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
+ tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
@@ -584,9 +584,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
/* Now start DMA */
if (rx)
- dma_async_issue_pending(rspi->master->dma_rx);
+ dma_async_issue_pending(rspi->ctlr->dma_rx);
if (tx)
- dma_async_issue_pending(rspi->master->dma_tx);
+ dma_async_issue_pending(rspi->ctlr->dma_tx);
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
@@ -594,13 +594,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = 0;
} else {
if (!ret) {
- dev_err(&rspi->master->dev, "DMA timeout\n");
+ dev_err(&rspi->ctlr->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
}
if (tx)
- dmaengine_terminate_all(rspi->master->dma_tx);
+ dmaengine_terminate_all(rspi->ctlr->dma_tx);
if (rx)
- dmaengine_terminate_all(rspi->master->dma_rx);
+ dmaengine_terminate_all(rspi->ctlr->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
@@ -614,12 +614,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
no_dma_tx:
if (rx)
- dmaengine_terminate_all(rspi->master->dma_rx);
+ dmaengine_terminate_all(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&rspi->master->dev),
- dev_name(&rspi->master->dev));
+ dev_driver_string(&rspi->ctlr->dev),
+ dev_name(&rspi->ctlr->dev));
}
return ret;
}
@@ -660,10 +660,10 @@ static bool __rspi_can_dma(const struct rspi_data *rspi,
return xfer->len > rspi->ops->fifo_size;
}
-static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
+static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
return __rspi_can_dma(rspi, xfer);
}
@@ -671,7 +671,7 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
- if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
+ if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
return -EAGAIN;
/* rx_buf can be NULL on RSPI on SH in TX-only Mode */
@@ -698,10 +698,10 @@ static int rspi_common_transfer(struct rspi_data *rspi,
return 0;
}
-static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
- struct spi_transfer *xfer)
+static int rspi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
u8 spcr;
spcr = rspi_read8(rspi, RSPI_SPCR);
@@ -716,11 +716,11 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
return rspi_common_transfer(rspi, xfer);
}
-static int rspi_rz_transfer_one(struct spi_master *master,
+static int rspi_rz_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
rspi_rz_receive_init(rspi);
@@ -739,7 +739,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
if (n == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
- dev_err(&rspi->master->dev, "transmit timeout\n");
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < n; i++)
@@ -747,7 +747,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
- dev_err(&rspi->master->dev, "receive timeout\n");
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < n; i++)
@@ -785,7 +785,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
unsigned int i, len;
int ret;
- if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
if (ret != -EAGAIN)
return ret;
@@ -796,7 +796,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
if (len == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
- dev_err(&rspi->master->dev, "transmit timeout\n");
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < len; i++)
@@ -822,7 +822,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
unsigned int i, len;
int ret;
- if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
if (ret != -EAGAIN)
return ret;
@@ -833,7 +833,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
if (len == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
- dev_err(&rspi->master->dev, "receive timeout\n");
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < len; i++)
@@ -849,10 +849,10 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
return 0;
}
-static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
- struct spi_transfer *xfer)
+static int qspi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi, struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
if (spi->mode & SPI_LOOP) {
return qspi_transfer_out_in(rspi, xfer);
@@ -870,7 +870,7 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
static int rspi_setup(struct spi_device *spi)
{
- struct rspi_data *rspi = spi_master_get_devdata(spi->master);
+ struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
rspi->max_speed_hz = spi->max_speed_hz;
@@ -955,10 +955,10 @@ static int qspi_setup_sequencer(struct rspi_data *rspi,
return 0;
}
-static int rspi_prepare_message(struct spi_master *master,
+static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
int ret;
if (msg->spi->mode &
@@ -974,10 +974,10 @@ static int rspi_prepare_message(struct spi_master *master,
return 0;
}
-static int rspi_unprepare_message(struct spi_master *master,
+static int rspi_unprepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct rspi_data *rspi = spi_master_get_devdata(master);
+ struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
/* Disable SPI function */
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
@@ -1081,7 +1081,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
return chan;
}
-static int rspi_request_dma(struct device *dev, struct spi_master *master,
+static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
const struct resource *res)
{
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
@@ -1099,37 +1099,37 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master,
return 0;
}
- master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
- res->start + RSPI_SPDR);
- if (!master->dma_tx)
+ ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
+ res->start + RSPI_SPDR);
+ if (!ctlr->dma_tx)
return -ENODEV;
- master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
- res->start + RSPI_SPDR);
- if (!master->dma_rx) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
+ res->start + RSPI_SPDR);
+ if (!ctlr->dma_rx) {
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
return -ENODEV;
}
- master->can_dma = rspi_can_dma;
+ ctlr->can_dma = rspi_can_dma;
dev_info(dev, "DMA available");
return 0;
}
-static void rspi_release_dma(struct spi_master *master)
+static void rspi_release_dma(struct spi_controller *ctlr)
{
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
}
static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = platform_get_drvdata(pdev);
- rspi_release_dma(rspi->master);
+ rspi_release_dma(rspi->ctlr);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -1139,7 +1139,7 @@ static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
- .flags = SPI_MASTER_MUST_TX,
+ .flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
};
@@ -1147,7 +1147,7 @@ static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
- .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
+ .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
};
@@ -1157,7 +1157,7 @@ static const struct spi_ops qspi_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD,
- .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
+ .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
};
@@ -1174,7 +1174,7 @@ static const struct of_device_id rspi_of_match[] = {
MODULE_DEVICE_TABLE(of, rspi_of_match);
-static int rspi_parse_dt(struct device *dev, struct spi_master *master)
+static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
u32 num_cs;
int error;
@@ -1186,12 +1186,12 @@ static int rspi_parse_dt(struct device *dev, struct spi_master *master)
return error;
}
- master->num_chipselect = num_cs;
+ ctlr->num_chipselect = num_cs;
return 0;
}
#else
#define rspi_of_match NULL
-static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
+static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
return -EINVAL;
}
@@ -1212,28 +1212,28 @@ static int rspi_request_irq(struct device *dev, unsigned int irq,
static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct rspi_data *rspi;
int ret;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
- master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
- if (master == NULL)
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
+ if (ctlr == NULL)
return -ENOMEM;
ops = of_device_get_match_data(&pdev->dev);
if (ops) {
- ret = rspi_parse_dt(&pdev->dev, master);
+ ret = rspi_parse_dt(&pdev->dev, ctlr);
if (ret)
goto error1;
} else {
ops = (struct spi_ops *)pdev->id_entry->driver_data;
rspi_pd = dev_get_platdata(&pdev->dev);
if (rspi_pd && rspi_pd->num_chipselect)
- master->num_chipselect = rspi_pd->num_chipselect;
+ ctlr->num_chipselect = rspi_pd->num_chipselect;
else
- master->num_chipselect = 2; /* default */
+ ctlr->num_chipselect = 2; /* default */
}
/* ops parameter check */
@@ -1243,10 +1243,10 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
- rspi = spi_master_get_devdata(master);
+ rspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
- rspi->master = master;
+ rspi->ctlr = ctlr;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rspi->addr = devm_ioremap_resource(&pdev->dev, res);
@@ -1266,15 +1266,15 @@ static int rspi_probe(struct platform_device *pdev)
init_waitqueue_head(&rspi->wait);
- master->bus_num = pdev->id;
- master->setup = rspi_setup;
- master->auto_runtime_pm = true;
- master->transfer_one = ops->transfer_one;
- master->prepare_message = rspi_prepare_message;
- master->unprepare_message = rspi_unprepare_message;
- master->mode_bits = ops->mode_bits;
- master->flags = ops->flags;
- master->dev.of_node = pdev->dev.of_node;
+ ctlr->bus_num = pdev->id;
+ ctlr->setup = rspi_setup;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one = ops->transfer_one;
+ ctlr->prepare_message = rspi_prepare_message;
+ ctlr->unprepare_message = rspi_unprepare_message;
+ ctlr->mode_bits = ops->mode_bits;
+ ctlr->flags = ops->flags;
+ ctlr->dev.of_node = pdev->dev.of_node;
ret = platform_get_irq_byname(pdev, "rx");
if (ret < 0) {
@@ -1311,13 +1311,13 @@ static int rspi_probe(struct platform_device *pdev)
goto error2;
}
- ret = rspi_request_dma(&pdev->dev, master, res);
+ ret = rspi_request_dma(&pdev->dev, ctlr, res);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_master error.\n");
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error3;
}
@@ -1326,11 +1326,11 @@ static int rspi_probe(struct platform_device *pdev)
return 0;
error3:
- rspi_release_dma(master);
+ rspi_release_dma(ctlr);
error2:
pm_runtime_disable(&pdev->dev);
error1:
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
}
@@ -1349,14 +1349,14 @@ static int rspi_suspend(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
- return spi_master_suspend(rspi->master);
+ return spi_controller_suspend(rspi->ctlr);
}
static int rspi_resume(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
- return spi_master_resume(rspi->master);
+ return spi_controller_resume(rspi->ctlr);
}
static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index dc0926e43665..7f73f91d412a 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -35,7 +35,7 @@
struct hspi_priv {
void __iomem *addr;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct device *dev;
struct clk *clk;
};
@@ -140,10 +140,10 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
}
-static int hspi_transfer_one_message(struct spi_master *master,
+static int hspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct hspi_priv *hspi = spi_master_get_devdata(master);
+ struct hspi_priv *hspi = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
u32 tx;
u32 rx;
@@ -205,7 +205,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
ndelay(nsecs);
hspi_hw_cs_disable(hspi);
}
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return ret;
}
@@ -213,7 +213,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct hspi_priv *hspi;
struct clk *clk;
int ret;
@@ -225,11 +225,9 @@ static int hspi_probe(struct platform_device *pdev)
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(*hspi));
- if (!master) {
- dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi));
+ if (!ctlr)
return -ENOMEM;
- }
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@@ -238,33 +236,32 @@ static int hspi_probe(struct platform_device *pdev)
goto error0;
}
- hspi = spi_master_get_devdata(master);
+ hspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, hspi);
/* init hspi */
- hspi->master = master;
+ hspi->ctlr = ctlr;
hspi->dev = &pdev->dev;
hspi->clk = clk;
hspi->addr = devm_ioremap(hspi->dev,
res->start, resource_size(res));
if (!hspi->addr) {
- dev_err(&pdev->dev, "ioremap error.\n");
ret = -ENOMEM;
goto error1;
}
pm_runtime_enable(&pdev->dev);
- master->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->dev.of_node = pdev->dev.of_node;
- master->auto_runtime_pm = true;
- master->transfer_one_message = hspi_transfer_one_message;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->bus_num = pdev->id;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one_message = hspi_transfer_one_message;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_master error.\n");
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error2;
}
@@ -275,7 +272,7 @@ static int hspi_probe(struct platform_device *pdev)
error1:
clk_put(clk);
error0:
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index d14b407cc800..e2eb466db10a 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * SuperH MSIOF SPI Master Interface
+ * SuperH MSIOF SPI Controller Interface
*
* Copyright (c) 2009 Magnus Damm
* Copyright (C) 2014 Renesas Electronics Corporation
@@ -32,14 +32,15 @@
#include <asm/unaligned.h>
struct sh_msiof_chipdata {
+ u32 bits_per_word_mask;
u16 tx_fifo_size;
u16 rx_fifo_size;
- u16 master_flags;
+ u16 ctlr_flags;
u16 min_div_pow;
};
struct sh_msiof_spi_priv {
- struct spi_master *master;
+ struct spi_controller *ctlr;
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
@@ -287,7 +288,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
sh_msiof_write(p, TSCR, scr);
- if (!(p->master->flags & SPI_MASTER_MUST_TX))
+ if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, RSCR, scr);
}
@@ -351,14 +352,14 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- if (spi_controller_is_slave(p->master)) {
+ if (spi_controller_is_slave(p->ctlr)) {
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
} else {
sh_msiof_write(p, TMDR1,
tmp | MDR1_TRMD | TMDR1_PCON |
(ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
}
- if (p->master->flags & SPI_MASTER_MUST_TX) {
+ if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
@@ -382,7 +383,7 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
{
u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
- if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX))
+ if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, TMDR2, dr2);
else
sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
@@ -539,8 +540,9 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
static int sh_msiof_spi_setup(struct spi_device *spi)
{
- struct device_node *np = spi->master->dev.of_node;
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ struct device_node *np = spi->controller->dev.of_node;
+ struct sh_msiof_spi_priv *p =
+ spi_controller_get_devdata(spi->controller);
u32 clr, set, tmp;
if (!np) {
@@ -556,7 +558,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
}
- if (spi_controller_is_slave(p->master))
+ if (spi_controller_is_slave(p->ctlr))
return 0;
if (p->native_cs_inited &&
@@ -581,10 +583,10 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
}
-static int sh_msiof_prepare_message(struct spi_master *master,
+static int sh_msiof_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
const struct spi_device *spi = msg->spi;
u32 ss, cs_high;
@@ -605,7 +607,7 @@ static int sh_msiof_prepare_message(struct spi_master *master,
static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
{
- bool slave = spi_controller_is_slave(p->master);
+ bool slave = spi_controller_is_slave(p->ctlr);
int ret = 0;
/* setup clock and rx/tx signals */
@@ -625,7 +627,7 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
{
- bool slave = spi_controller_is_slave(p->master);
+ bool slave = spi_controller_is_slave(p->ctlr);
int ret = 0;
/* shut down frame, rx/tx and clock signals */
@@ -641,9 +643,9 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
return ret;
}
-static int sh_msiof_slave_abort(struct spi_master *master)
+static int sh_msiof_slave_abort(struct spi_controller *ctlr)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
p->slave_aborted = true;
complete(&p->done);
@@ -654,7 +656,7 @@ static int sh_msiof_slave_abort(struct spi_master *master)
static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
struct completion *x)
{
- if (spi_controller_is_slave(p->master)) {
+ if (spi_controller_is_slave(p->ctlr)) {
if (wait_for_completion_interruptible(x) ||
p->slave_aborted) {
dev_dbg(&p->pdev->dev, "interrupted\n");
@@ -754,7 +756,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
ier_bits |= IER_RDREQE | IER_RDMAE;
- desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
+ desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
@@ -769,9 +771,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
if (tx) {
ier_bits |= IER_TDREQE | IER_TDMAE;
- dma_sync_single_for_device(p->master->dma_tx->device->dev,
+ dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
- desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
+ desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
p->tx_dma_addr, len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
@@ -803,9 +805,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* Now start DMA */
if (rx)
- dma_async_issue_pending(p->master->dma_rx);
+ dma_async_issue_pending(p->ctlr->dma_rx);
if (tx)
- dma_async_issue_pending(p->master->dma_tx);
+ dma_async_issue_pending(p->ctlr->dma_tx);
ret = sh_msiof_spi_start(p, rx);
if (ret) {
@@ -845,9 +847,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
if (rx)
- dma_sync_single_for_cpu(p->master->dma_rx->device->dev,
- p->rx_dma_addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
+ p->rx_dma_addr, len, DMA_FROM_DEVICE);
return 0;
@@ -856,10 +857,10 @@ stop_reset:
sh_msiof_spi_stop(p, rx);
stop_dma:
if (tx)
- dmaengine_terminate_all(p->master->dma_tx);
+ dmaengine_terminate_all(p->ctlr->dma_tx);
no_dma_tx:
if (rx)
- dmaengine_terminate_all(p->master->dma_rx);
+ dmaengine_terminate_all(p->ctlr->dma_rx);
sh_msiof_write(p, IER, 0);
return ret;
}
@@ -907,11 +908,11 @@ static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
memcpy(dst, src, words * 4);
}
-static int sh_msiof_transfer_one(struct spi_master *master,
+static int sh_msiof_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
void (*copy32)(u32 *, const u32 *, unsigned int);
void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
@@ -926,10 +927,10 @@ static int sh_msiof_transfer_one(struct spi_master *master,
int ret;
/* setup clocks (clock already enabled in chipselect()) */
- if (!spi_controller_is_slave(p->master))
+ if (!spi_controller_is_slave(p->ctlr))
sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
- while (master->dma_tx && len > 15) {
+ while (ctlr->dma_tx && len > 15) {
/*
* DMA supports 32-bit words only, hence pack 8-bit and 16-bit
* words, with byte resp. word swapping.
@@ -937,17 +938,13 @@ static int sh_msiof_transfer_one(struct spi_master *master,
unsigned int l = 0;
if (tx_buf)
- l = min(len, p->tx_fifo_size * 4);
+ l = min(round_down(len, 4), p->tx_fifo_size * 4);
if (rx_buf)
- l = min(len, p->rx_fifo_size * 4);
+ l = min(round_down(len, 4), p->rx_fifo_size * 4);
if (bits <= 8) {
- if (l & 3)
- break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
- if (l & 3)
- break;
copy32 = copy_wswap32;
} else {
copy32 = copy_plain32;
@@ -1052,23 +1049,28 @@ static int sh_msiof_transfer_one(struct spi_master *master,
}
static const struct sh_msiof_chipdata sh_data = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
- .master_flags = 0,
+ .ctlr_flags = 0,
.min_div_pow = 0,
};
static const struct sh_msiof_chipdata rcar_gen2_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
- .master_flags = SPI_MASTER_MUST_TX,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 0,
};
static const struct sh_msiof_chipdata rcar_gen3_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
- .master_flags = SPI_MASTER_MUST_TX,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 1,
};
@@ -1136,7 +1138,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
if (ret <= 0)
return 0;
- num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
+ num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
for (i = 0; i < num_cs; i++) {
struct gpio_desc *gpiod;
@@ -1206,10 +1208,10 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
{
struct platform_device *pdev = p->pdev;
struct device *dev = &pdev->dev;
- const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
+ const struct sh_msiof_spi_info *info = p->info;
unsigned int dma_tx_id, dma_rx_id;
const struct resource *res;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct device *tx_dev, *rx_dev;
if (dev->of_node) {
@@ -1229,17 +1231,15 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master = p->master;
- master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
- dma_tx_id,
- res->start + TFDR);
- if (!master->dma_tx)
+ ctlr = p->ctlr;
+ ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
+ dma_tx_id, res->start + TFDR);
+ if (!ctlr->dma_tx)
return -ENODEV;
- master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
- dma_rx_id,
- res->start + RFDR);
- if (!master->dma_rx)
+ ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
+ dma_rx_id, res->start + RFDR);
+ if (!ctlr->dma_rx)
goto free_tx_chan;
p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
@@ -1250,13 +1250,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
if (!p->rx_dma_page)
goto free_tx_page;
- tx_dev = master->dma_tx->device->dev;
+ tx_dev = ctlr->dma_tx->device->dev;
p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(tx_dev, p->tx_dma_addr))
goto free_rx_page;
- rx_dev = master->dma_rx->device->dev;
+ rx_dev = ctlr->dma_rx->device->dev;
p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_dev, p->rx_dma_addr))
@@ -1272,34 +1272,34 @@ free_rx_page:
free_tx_page:
free_page((unsigned long)p->tx_dma_page);
free_rx_chan:
- dma_release_channel(master->dma_rx);
+ dma_release_channel(ctlr->dma_rx);
free_tx_chan:
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
return -ENODEV;
}
static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
{
- struct spi_master *master = p->master;
+ struct spi_controller *ctlr = p->ctlr;
- if (!master->dma_tx)
+ if (!ctlr->dma_tx)
return;
- dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
- PAGE_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
+ DMA_TO_DEVICE);
free_page((unsigned long)p->rx_dma_page);
free_page((unsigned long)p->tx_dma_page);
- dma_release_channel(master->dma_rx);
- dma_release_channel(master->dma_tx);
+ dma_release_channel(ctlr->dma_rx);
+ dma_release_channel(ctlr->dma_tx);
}
static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
- struct spi_master *master;
+ struct spi_controller *ctlr;
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
@@ -1320,18 +1320,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
}
if (info->mode == MSIOF_SPI_SLAVE)
- master = spi_alloc_slave(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
+ ctlr = spi_alloc_slave(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
else
- master = spi_alloc_master(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
- if (master == NULL)
+ ctlr = spi_alloc_master(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
+ if (ctlr == NULL)
return -ENOMEM;
- p = spi_master_get_devdata(master);
+ p = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, p);
- p->master = master;
+ p->ctlr = ctlr;
p->info = info;
p->min_div_pow = chipdata->min_div_pow;
@@ -1378,31 +1378,31 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p->rx_fifo_size = p->info->rx_fifo_override;
/* Setup GPIO chip selects */
- master->num_chipselect = p->info->num_chipselect;
+ ctlr->num_chipselect = p->info->num_chipselect;
ret = sh_msiof_get_cs_gpios(p);
if (ret)
goto err1;
- /* init master code */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
- master->flags = chipdata->master_flags;
- master->bus_num = pdev->id;
- master->dev.of_node = pdev->dev.of_node;
- master->setup = sh_msiof_spi_setup;
- master->prepare_message = sh_msiof_prepare_message;
- master->slave_abort = sh_msiof_slave_abort;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- master->auto_runtime_pm = true;
- master->transfer_one = sh_msiof_transfer_one;
+ /* init controller code */
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
+ ctlr->flags = chipdata->ctlr_flags;
+ ctlr->bus_num = pdev->id;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->setup = sh_msiof_spi_setup;
+ ctlr->prepare_message = sh_msiof_prepare_message;
+ ctlr->slave_abort = sh_msiof_slave_abort;
+ ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
+ ctlr->auto_runtime_pm = true;
+ ctlr->transfer_one = sh_msiof_transfer_one;
ret = sh_msiof_request_dma(p);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_master error.\n");
+ dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto err2;
}
@@ -1412,7 +1412,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
sh_msiof_release_dma(p);
pm_runtime_disable(&pdev->dev);
err1:
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
}
@@ -1436,14 +1436,14 @@ static int sh_msiof_spi_suspend(struct device *dev)
{
struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
- return spi_master_suspend(p->master);
+ return spi_controller_suspend(p->ctlr);
}
static int sh_msiof_spi_resume(struct device *dev)
{
struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
- return spi_master_resume(p->master);
+ return spi_controller_resume(p->ctlr);
}
static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
@@ -1465,7 +1465,7 @@ static struct platform_driver sh_msiof_spi_drv = {
};
module_platform_driver(sh_msiof_spi_drv);
-MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
+MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:spi_sh_msiof");
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
new file mode 100644
index 000000000000..93ec2c6cdbfd
--- /dev/null
+++ b/drivers/spi/spi-sifive.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2018 SiFive, Inc.
+//
+// SiFive SPI controller driver (master mode only)
+//
+// Author: SiFive, Inc.
+// sifive@sifive.com
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+
+#define SIFIVE_SPI_DRIVER_NAME "sifive_spi"
+
+#define SIFIVE_SPI_MAX_CS 32
+#define SIFIVE_SPI_DEFAULT_DEPTH 8
+#define SIFIVE_SPI_DEFAULT_MAX_BITS 8
+
+/* register offsets */
+#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */
+#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */
+#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */
+#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */
+#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */
+#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */
+#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */
+#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */
+#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */
+#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */
+#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */
+#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */
+#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */
+#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */
+#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */
+#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */
+
+/* sckdiv bits */
+#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU
+
+/* sckmode bits */
+#define SIFIVE_SPI_SCKMODE_PHA BIT(0)
+#define SIFIVE_SPI_SCKMODE_POL BIT(1)
+#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \
+ SIFIVE_SPI_SCKMODE_POL)
+
+/* csmode bits */
+#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U
+#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U
+#define SIFIVE_SPI_CSMODE_MODE_OFF 3U
+
+/* delay0 bits */
+#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x))
+#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU
+#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16)
+
+/* delay1 bits */
+#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x))
+#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU
+#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16)
+
+/* fmt bits */
+#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U
+#define SIFIVE_SPI_FMT_PROTO_DUAL 1U
+#define SIFIVE_SPI_FMT_PROTO_QUAD 2U
+#define SIFIVE_SPI_FMT_PROTO_MASK 3U
+#define SIFIVE_SPI_FMT_ENDIAN BIT(2)
+#define SIFIVE_SPI_FMT_DIR BIT(3)
+#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16)
+
+/* txdata bits */
+#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU
+#define SIFIVE_SPI_TXDATA_FULL BIT(31)
+
+/* rxdata bits */
+#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU
+#define SIFIVE_SPI_RXDATA_EMPTY BIT(31)
+
+/* ie and ip bits */
+#define SIFIVE_SPI_IP_TXWM BIT(0)
+#define SIFIVE_SPI_IP_RXWM BIT(1)
+
+struct sifive_spi {
+ void __iomem *regs; /* virt. address of control registers */
+ struct clk *clk; /* bus clock */
+ unsigned int fifo_depth; /* fifo depth in words */
+ u32 cs_inactive; /* level of the CS pins when inactive */
+ struct completion done; /* wake-up from interrupt */
+};
+
+static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value)
+{
+ iowrite32(value, spi->regs + offset);
+}
+
+static u32 sifive_spi_read(struct sifive_spi *spi, int offset)
+{
+ return ioread32(spi->regs + offset);
+}
+
+static void sifive_spi_init(struct sifive_spi *spi)
+{
+ /* Watermark interrupts are disabled by default */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+
+ /* Default watermark FIFO threshold values */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0);
+
+ /* Set CS/SCK Delays and Inactive Time to defaults */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0,
+ SIFIVE_SPI_DELAY0_CSSCK(1) |
+ SIFIVE_SPI_DELAY0_SCKCS(1));
+ sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1,
+ SIFIVE_SPI_DELAY1_INTERCS(1) |
+ SIFIVE_SPI_DELAY1_INTERXFR(0));
+
+ /* Exit specialized memory-mapped SPI flash mode */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0);
+}
+
+static int
+sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ struct spi_device *device = msg->spi;
+
+ /* Update the chip select polarity */
+ if (device->mode & SPI_CS_HIGH)
+ spi->cs_inactive &= ~BIT(device->chip_select);
+ else
+ spi->cs_inactive |= BIT(device->chip_select);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
+
+ /* Select the correct device */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, device->chip_select);
+
+ /* Set clock mode */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE,
+ device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK);
+
+ return 0;
+}
+
+static void sifive_spi_set_cs(struct spi_device *device, bool is_high)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(device->master);
+
+ /* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */
+ if (device->mode & SPI_CS_HIGH)
+ is_high = !is_high;
+
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ?
+ SIFIVE_SPI_CSMODE_MODE_AUTO :
+ SIFIVE_SPI_CSMODE_MODE_HOLD);
+}
+
+static int
+sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device,
+ struct spi_transfer *t)
+{
+ u32 cr;
+ unsigned int mode;
+
+ /* Calculate and program the clock rate */
+ cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1;
+ cr &= SIFIVE_SPI_SCKDIV_DIV_MASK;
+ sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr);
+
+ mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits);
+
+ /* Set frame format */
+ cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word);
+ switch (mode) {
+ case SPI_NBITS_QUAD:
+ cr |= SIFIVE_SPI_FMT_PROTO_QUAD;
+ break;
+ case SPI_NBITS_DUAL:
+ cr |= SIFIVE_SPI_FMT_PROTO_DUAL;
+ break;
+ default:
+ cr |= SIFIVE_SPI_FMT_PROTO_SINGLE;
+ break;
+ }
+ if (device->mode & SPI_LSB_FIRST)
+ cr |= SIFIVE_SPI_FMT_ENDIAN;
+ if (!t->rx_buf)
+ cr |= SIFIVE_SPI_FMT_DIR;
+ sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr);
+
+ /* We will want to poll if the time we need to wait is
+ * less than the context switching time.
+ * Let's call that threshold 5us. The operation will take:
+ * (8/mode) * fifo_depth / hz <= 5 * 10^-6
+ * 1600000 * fifo_depth <= hz * mode
+ */
+ return 1600000 * spi->fifo_depth <= t->speed_hz * mode;
+}
+
+static irqreturn_t sifive_spi_irq(int irq, void *dev_id)
+{
+ struct sifive_spi *spi = dev_id;
+ u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
+
+ if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) {
+ /* Disable interrupts until next transfer */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+ complete(&spi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll)
+{
+ if (poll) {
+ u32 cr;
+
+ do {
+ cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
+ } while (!(cr & bit));
+ } else {
+ reinit_completion(&spi->done);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit);
+ wait_for_completion(&spi->done);
+ }
+}
+
+static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr)
+{
+ WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA)
+ & SIFIVE_SPI_TXDATA_FULL) != 0);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA,
+ *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK);
+}
+
+static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr)
+{
+ u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA);
+
+ WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0);
+ *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK;
+}
+
+static int
+sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
+ struct spi_transfer *t)
+{
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+ int poll = sifive_spi_prep_transfer(spi, device, t);
+ const u8 *tx_ptr = t->tx_buf;
+ u8 *rx_ptr = t->rx_buf;
+ unsigned int remaining_words = t->len;
+
+ while (remaining_words) {
+ unsigned int n_words = min(remaining_words, spi->fifo_depth);
+ unsigned int i;
+
+ /* Enqueue n_words for transmission */
+ for (i = 0; i < n_words; i++)
+ sifive_spi_tx(spi, tx_ptr++);
+
+ if (rx_ptr) {
+ /* Wait for transmission + reception to complete */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK,
+ n_words - 1);
+ sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll);
+
+ /* Read out all the data from the RX FIFO */
+ for (i = 0; i < n_words; i++)
+ sifive_spi_rx(spi, rx_ptr++);
+ } else {
+ /* Wait for transmission to complete */
+ sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll);
+ }
+
+ remaining_words -= n_words;
+ }
+
+ return 0;
+}
+
+static int sifive_spi_probe(struct platform_device *pdev)
+{
+ struct sifive_spi *spi;
+ struct resource *res;
+ int ret, irq, num_cs;
+ u32 cs_bits, max_bits_per_word;
+ struct spi_master *master;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ spi = spi_master_get_devdata(master);
+ init_completion(&spi->done);
+ platform_set_drvdata(pdev, master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi->regs)) {
+ ret = PTR_ERR(spi->regs);
+ goto put_master;
+ }
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ dev_err(&pdev->dev, "Unable to find bus clock\n");
+ ret = PTR_ERR(spi->clk);
+ goto put_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Unable to find interrupt\n");
+ ret = irq;
+ goto put_master;
+ }
+
+ /* Optional parameters */
+ ret =
+ of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth",
+ &spi->fifo_depth);
+ if (ret < 0)
+ spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH;
+
+ ret =
+ of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word",
+ &max_bits_per_word);
+
+ if (!ret && max_bits_per_word < 8) {
+ dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n");
+ ret = -EINVAL;
+ goto put_master;
+ }
+
+ /* Spin up the bus clock before hitting registers */
+ ret = clk_prepare_enable(spi->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable bus clock\n");
+ goto put_master;
+ }
+
+ /* probe the number of CS lines */
+ spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU);
+ cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
+ sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
+ if (!cs_bits) {
+ dev_err(&pdev->dev, "Could not auto probe CS lines\n");
+ ret = -EINVAL;
+ goto put_master;
+ }
+
+ num_cs = ilog2(cs_bits) + 1;
+ if (num_cs > SIFIVE_SPI_MAX_CS) {
+ dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ ret = -EINVAL;
+ goto put_master;
+ }
+
+ /* Define our master */
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+ master->mode_bits = SPI_CPHA | SPI_CPOL
+ | SPI_CS_HIGH | SPI_LSB_FIRST
+ | SPI_TX_DUAL | SPI_TX_QUAD
+ | SPI_RX_DUAL | SPI_RX_QUAD;
+ /* TODO: add driver support for bits_per_word < 8
+ * we need to "left-align" the bits (unless SPI_LSB_FIRST)
+ */
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS;
+ master->prepare_message = sifive_spi_prepare_message;
+ master->set_cs = sifive_spi_set_cs;
+ master->transfer_one = sifive_spi_transfer_one;
+
+ pdev->dev.dma_mask = NULL;
+ /* Configure the SPI master hardware */
+ sifive_spi_init(spi);
+
+ /* Register for SPI Interrupt */
+ ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0,
+ dev_name(&pdev->dev), spi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to bind to interrupt\n");
+ goto put_master;
+ }
+
+ dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
+ irq, master->num_chipselect);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto put_master;
+ }
+
+ return 0;
+
+put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int sifive_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct sifive_spi *spi = spi_master_get_devdata(master);
+
+ /* Disable all the interrupts just in case */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
+
+ return 0;
+}
+
+static const struct of_device_id sifive_spi_of_match[] = {
+ { .compatible = "sifive,spi0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sifive_spi_of_match);
+
+static struct platform_driver sifive_spi_driver = {
+ .probe = sifive_spi_probe,
+ .remove = sifive_spi_remove,
+ .driver = {
+ .name = SIFIVE_SPI_DRIVER_NAME,
+ .of_match_table = sifive_spi_of_match,
+ },
+};
+module_platform_driver(sifive_spi_driver);
+
+MODULE_AUTHOR("SiFive, Inc. <sifive@sifive.com>");
+MODULE_DESCRIPTION("SiFive SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 8daa24eec624..1b7eebb72c07 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -2,6 +2,9 @@
// Copyright (C) 2018 Spreadtrum Communications Inc.
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/sprd-dma.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -9,6 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -128,11 +132,28 @@
#define SPRD_SPI_DEFAULT_SOURCE 26000000
#define SPRD_SPI_MAX_SPEED_HZ 48000000
#define SPRD_SPI_AUTOSUSPEND_DELAY 100
+#define SPRD_SPI_DMA_STEP 8
+
+enum sprd_spi_dma_channel {
+ SPRD_SPI_RX,
+ SPRD_SPI_TX,
+ SPRD_SPI_MAX,
+};
+
+struct sprd_spi_dma {
+ bool enable;
+ struct dma_chan *dma_chan[SPRD_SPI_MAX];
+ enum dma_slave_buswidth width;
+ u32 fragmens_len;
+ u32 rx_len;
+};
struct sprd_spi {
void __iomem *base;
+ phys_addr_t phy_base;
struct device *dev;
struct clk *clk;
+ int irq;
u32 src_clk;
u32 hw_mode;
u32 trans_len;
@@ -141,6 +162,8 @@ struct sprd_spi {
u32 hw_speed_hz;
u32 len;
int status;
+ struct sprd_spi_dma dma;
+ struct completion xfer_completion;
const void *tx_buf;
void *rx_buf;
int (*read_bufs)(struct sprd_spi *ss, u32 len);
@@ -380,7 +403,7 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u32 trans_len = ss->trans_len, len;
- int ret, write_size = 0;
+ int ret, write_size = 0, read_size = 0;
while (trans_len) {
len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE :
@@ -416,19 +439,223 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
goto complete;
if (ss->trans_mode & SPRD_SPI_RX_MODE)
- ss->read_bufs(ss, len);
+ read_size += ss->read_bufs(ss, len);
trans_len -= len;
}
- ret = write_size;
-
+ if (ss->trans_mode & SPRD_SPI_TX_MODE)
+ ret = write_size;
+ else
+ ret = read_size;
complete:
sprd_spi_enter_idle(ss);
return ret;
}
+static void sprd_spi_irq_enable(struct sprd_spi *ss)
+{
+ u32 val;
+
+ /* Clear interrupt status before enabling interrupt. */
+ writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR,
+ ss->base + SPRD_SPI_INT_CLR);
+ /* Enable SPI interrupt only in DMA mode. */
+ val = readl_relaxed(ss->base + SPRD_SPI_INT_EN);
+ writel_relaxed(val | SPRD_SPI_TX_END_INT_EN |
+ SPRD_SPI_RX_END_INT_EN,
+ ss->base + SPRD_SPI_INT_EN);
+}
+
+static void sprd_spi_irq_disable(struct sprd_spi *ss)
+{
+ writel_relaxed(0, ss->base + SPRD_SPI_INT_EN);
+}
+
+static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2);
+
+ if (enable)
+ val |= SPRD_SPI_DMA_EN;
+ else
+ val &= ~SPRD_SPI_DMA_EN;
+
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL2);
+}
+
+static int sprd_spi_dma_submit(struct dma_chan *dma_chan,
+ struct dma_slave_config *c,
+ struct sg_table *sg,
+ enum dma_transfer_direction dir)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+ int ret;
+
+ ret = dmaengine_slave_config(dma_chan, c);
+ if (ret < 0)
+ return ret;
+
+ flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
+ desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags);
+ if (!desc)
+ return -ENODEV;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie))
+ return dma_submit_error(cookie);
+
+ dma_async_issue_pending(dma_chan);
+
+ return 0;
+}
+
+static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX];
+ struct dma_slave_config config = {
+ .src_addr = ss->phy_base,
+ .src_addr_width = ss->dma.width,
+ .dst_addr_width = ss->dma.width,
+ .dst_maxburst = ss->dma.fragmens_len,
+ };
+ int ret;
+
+ ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
+ if (ret)
+ return ret;
+
+ return ss->dma.rx_len;
+}
+
+static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX];
+ struct dma_slave_config config = {
+ .dst_addr = ss->phy_base,
+ .src_addr_width = ss->dma.width,
+ .dst_addr_width = ss->dma.width,
+ .src_maxburst = ss->dma.fragmens_len,
+ };
+ int ret;
+
+ ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
+ if (ret)
+ return ret;
+
+ return t->len;
+}
+
+static int sprd_spi_dma_request(struct sprd_spi *ss)
+{
+ ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
+ if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) {
+ if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER)
+ return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
+
+ dev_err(ss->dev, "request RX DMA channel failed!\n");
+ return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
+ }
+
+ ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
+ if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
+ if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER)
+ return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
+
+ dev_err(ss->dev, "request TX DMA channel failed!\n");
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
+ return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
+ }
+
+ return 0;
+}
+
+static void sprd_spi_dma_release(struct sprd_spi *ss)
+{
+ if (ss->dma.dma_chan[SPRD_SPI_RX])
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
+
+ if (ss->dma.dma_chan[SPRD_SPI_TX])
+ dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]);
+}
+
+static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
+ struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
+ u32 trans_len = ss->trans_len;
+ int ret, write_size = 0;
+
+ reinit_completion(&ss->xfer_completion);
+ sprd_spi_irq_enable(ss);
+ if (ss->trans_mode & SPRD_SPI_TX_MODE) {
+ write_size = sprd_spi_dma_tx_config(ss, t);
+ sprd_spi_set_tx_length(ss, trans_len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to transfer.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_tx_req(ss);
+ } else {
+ sprd_spi_set_rx_length(ss, trans_len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to read.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_rx_req(ss);
+ else
+ write_size = ss->write_bufs(ss, trans_len);
+ }
+
+ if (write_size < 0) {
+ ret = write_size;
+ dev_err(ss->dev, "failed to write, ret = %d\n", ret);
+ goto trans_complete;
+ }
+
+ if (ss->trans_mode & SPRD_SPI_RX_MODE) {
+ /*
+ * Set up the DMA receive data length, which must be an
+ * integral multiple of fragment length. But when the length
+ * of received data is less than fragment length, DMA can be
+ * configured to receive data according to the actual length
+ * of received data.
+ */
+ ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
+ (t->len - t->len % ss->dma.fragmens_len) :
+ t->len;
+ ret = sprd_spi_dma_rx_config(ss, t);
+ if (ret < 0) {
+ dev_err(&sdev->dev,
+ "failed to configure rx DMA, ret = %d\n", ret);
+ goto trans_complete;
+ }
+ }
+
+ sprd_spi_dma_enable(ss, true);
+ wait_for_completion(&(ss->xfer_completion));
+
+ if (ss->trans_mode & SPRD_SPI_TX_MODE)
+ ret = write_size;
+ else
+ ret = ss->dma.rx_len;
+
+trans_complete:
+ sprd_spi_dma_enable(ss, false);
+ sprd_spi_enter_idle(ss);
+ sprd_spi_irq_disable(ss);
+
+ return ret;
+}
+
static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
{
/*
@@ -514,16 +741,22 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev,
ss->trans_len = t->len;
ss->read_bufs = sprd_spi_read_bufs_u8;
ss->write_bufs = sprd_spi_write_bufs_u8;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP;
break;
case 16:
ss->trans_len = t->len >> 1;
ss->read_bufs = sprd_spi_read_bufs_u16;
ss->write_bufs = sprd_spi_write_bufs_u16;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1;
break;
case 32:
ss->trans_len = t->len >> 2;
ss->read_bufs = sprd_spi_read_bufs_u32;
ss->write_bufs = sprd_spi_write_bufs_u32;
+ ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2;
break;
default:
return -EINVAL;
@@ -561,7 +794,11 @@ static int sprd_spi_transfer_one(struct spi_controller *sctlr,
if (ret)
goto setup_err;
- ret = sprd_spi_txrx_bufs(sdev, t);
+ if (sctlr->can_dma(sctlr, sdev, t))
+ ret = sprd_spi_dma_txrx_bufs(sdev, t);
+ else
+ ret = sprd_spi_txrx_bufs(sdev, t);
+
if (ret == t->len)
ret = 0;
else if (ret >= 0)
@@ -573,6 +810,53 @@ setup_err:
return ret;
}
+static irqreturn_t sprd_spi_handle_irq(int irq, void *data)
+{
+ struct sprd_spi *ss = (struct sprd_spi *)data;
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS);
+
+ if (val & SPRD_SPI_MASK_TX_END) {
+ writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
+ if (!(ss->trans_mode & SPRD_SPI_RX_MODE))
+ complete(&ss->xfer_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ if (val & SPRD_SPI_MASK_RX_END) {
+ writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
+ if (ss->dma.rx_len < ss->len) {
+ ss->rx_buf += ss->dma.rx_len;
+ ss->dma.rx_len +=
+ ss->read_bufs(ss, ss->len - ss->dma.rx_len);
+ }
+ complete(&ss->xfer_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
+{
+ int ret;
+
+ ss->irq = platform_get_irq(pdev, 0);
+ if (ss->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return ss->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
+ 0, pdev->name, ss);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n",
+ ss->irq, ret);
+
+ return ret;
+}
+
static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
{
struct clk *clk_spi, *clk_parent;
@@ -603,6 +887,35 @@ static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
return 0;
}
+static bool sprd_spi_can_dma(struct spi_controller *sctlr,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+
+ return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
+}
+
+static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss)
+{
+ int ret;
+
+ ret = sprd_spi_dma_request(ss);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ dev_warn(&pdev->dev,
+ "failed to request dma, enter no dma mode, ret = %d\n",
+ ret);
+
+ return 0;
+ }
+
+ ss->dma.enable = true;
+
+ return 0;
+}
+
static int sprd_spi_probe(struct platform_device *pdev)
{
struct spi_controller *sctlr;
@@ -623,25 +936,36 @@ static int sprd_spi_probe(struct platform_device *pdev)
goto free_controller;
}
+ ss->phy_base = res->start;
ss->dev = &pdev->dev;
sctlr->dev.of_node = pdev->dev.of_node;
sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
sctlr->bus_num = pdev->id;
sctlr->set_cs = sprd_spi_chipselect;
sctlr->transfer_one = sprd_spi_transfer_one;
+ sctlr->can_dma = sprd_spi_can_dma;
sctlr->auto_runtime_pm = true;
sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
SPRD_SPI_MAX_SPEED_HZ);
+ init_completion(&ss->xfer_completion);
platform_set_drvdata(pdev, sctlr);
ret = sprd_spi_clk_init(pdev, ss);
if (ret)
goto free_controller;
- ret = clk_prepare_enable(ss->clk);
+ ret = sprd_spi_irq_init(pdev, ss);
if (ret)
goto free_controller;
+ ret = sprd_spi_dma_init(pdev, ss);
+ if (ret)
+ goto free_controller;
+
+ ret = clk_prepare_enable(ss->clk);
+ if (ret)
+ goto release_dma;
+
ret = pm_runtime_set_active(&pdev->dev);
if (ret < 0)
goto disable_clk;
@@ -670,6 +994,8 @@ err_rpm_put:
pm_runtime_disable(&pdev->dev);
disable_clk:
clk_disable_unprepare(ss->clk);
+release_dma:
+ sprd_spi_dma_release(ss);
free_controller:
spi_controller_put(sctlr);
@@ -688,6 +1014,10 @@ static int sprd_spi_remove(struct platform_device *pdev)
return ret;
}
+ spi_controller_suspend(sctlr);
+
+ if (ss->dma.enable)
+ sprd_spi_dma_release(ss);
clk_disable_unprepare(ss->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -700,6 +1030,9 @@ static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+ if (ss->dma.enable)
+ sprd_spi_dma_release(ss);
+
clk_disable_unprepare(ss->clk);
return 0;
@@ -715,7 +1048,14 @@ static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
if (ret)
return ret;
- return 0;
+ if (!ss->dma.enable)
+ return 0;
+
+ ret = sprd_spi_dma_request(ss);
+ if (ret)
+ clk_disable_unprepare(ss->clk);
+
+ return ret;
}
static const struct dev_pm_ops sprd_spi_pm_ops = {
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index ad1e55d3d5d5..4186ed20d796 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1,23 +1,10 @@
-/*
- * STMicroelectronics STM32 SPI Controller driver (master mode only)
- *
- * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
- * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
- *
- * License terms: GPL V2.0.
- *
- * spi_stm32 driver is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * spi_stm32 driver is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License along with
- * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// STMicroelectronics STM32 SPI Controller driver (master mode only)
+//
+// Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
+
#include <linux/debugfs.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -33,99 +20,251 @@
#define DRIVER_NAME "spi_stm32"
-/* STM32 SPI registers */
-#define STM32_SPI_CR1 0x00
-#define STM32_SPI_CR2 0x04
-#define STM32_SPI_CFG1 0x08
-#define STM32_SPI_CFG2 0x0C
-#define STM32_SPI_IER 0x10
-#define STM32_SPI_SR 0x14
-#define STM32_SPI_IFCR 0x18
-#define STM32_SPI_TXDR 0x20
-#define STM32_SPI_RXDR 0x30
-#define STM32_SPI_I2SCFGR 0x50
-
-/* STM32_SPI_CR1 bit fields */
-#define SPI_CR1_SPE BIT(0)
-#define SPI_CR1_MASRX BIT(8)
-#define SPI_CR1_CSTART BIT(9)
-#define SPI_CR1_CSUSP BIT(10)
-#define SPI_CR1_HDDIR BIT(11)
-#define SPI_CR1_SSI BIT(12)
-
-/* STM32_SPI_CR2 bit fields */
-#define SPI_CR2_TSIZE_SHIFT 0
-#define SPI_CR2_TSIZE GENMASK(15, 0)
-
-/* STM32_SPI_CFG1 bit fields */
-#define SPI_CFG1_DSIZE_SHIFT 0
-#define SPI_CFG1_DSIZE GENMASK(4, 0)
-#define SPI_CFG1_FTHLV_SHIFT 5
-#define SPI_CFG1_FTHLV GENMASK(8, 5)
-#define SPI_CFG1_RXDMAEN BIT(14)
-#define SPI_CFG1_TXDMAEN BIT(15)
-#define SPI_CFG1_MBR_SHIFT 28
-#define SPI_CFG1_MBR GENMASK(30, 28)
-#define SPI_CFG1_MBR_MIN 0
-#define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
-
-/* STM32_SPI_CFG2 bit fields */
-#define SPI_CFG2_MIDI_SHIFT 4
-#define SPI_CFG2_MIDI GENMASK(7, 4)
-#define SPI_CFG2_COMM_SHIFT 17
-#define SPI_CFG2_COMM GENMASK(18, 17)
-#define SPI_CFG2_SP_SHIFT 19
-#define SPI_CFG2_SP GENMASK(21, 19)
-#define SPI_CFG2_MASTER BIT(22)
-#define SPI_CFG2_LSBFRST BIT(23)
-#define SPI_CFG2_CPHA BIT(24)
-#define SPI_CFG2_CPOL BIT(25)
-#define SPI_CFG2_SSM BIT(26)
-#define SPI_CFG2_AFCNTR BIT(31)
-
-/* STM32_SPI_IER bit fields */
-#define SPI_IER_RXPIE BIT(0)
-#define SPI_IER_TXPIE BIT(1)
-#define SPI_IER_DXPIE BIT(2)
-#define SPI_IER_EOTIE BIT(3)
-#define SPI_IER_TXTFIE BIT(4)
-#define SPI_IER_OVRIE BIT(6)
-#define SPI_IER_MODFIE BIT(9)
-#define SPI_IER_ALL GENMASK(10, 0)
-
-/* STM32_SPI_SR bit fields */
-#define SPI_SR_RXP BIT(0)
-#define SPI_SR_TXP BIT(1)
-#define SPI_SR_EOT BIT(3)
-#define SPI_SR_OVR BIT(6)
-#define SPI_SR_MODF BIT(9)
-#define SPI_SR_SUSP BIT(11)
-#define SPI_SR_RXPLVL_SHIFT 13
-#define SPI_SR_RXPLVL GENMASK(14, 13)
-#define SPI_SR_RXWNE BIT(15)
-
-/* STM32_SPI_IFCR bit fields */
-#define SPI_IFCR_ALL GENMASK(11, 3)
-
-/* STM32_SPI_I2SCFGR bit fields */
-#define SPI_I2SCFGR_I2SMOD BIT(0)
-
-/* SPI Master Baud Rate min/max divisor */
-#define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN)
-#define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX)
-
-/* SPI Communication mode */
+/* STM32F4 SPI registers */
+#define STM32F4_SPI_CR1 0x00
+#define STM32F4_SPI_CR2 0x04
+#define STM32F4_SPI_SR 0x08
+#define STM32F4_SPI_DR 0x0C
+#define STM32F4_SPI_I2SCFGR 0x1C
+
+/* STM32F4_SPI_CR1 bit fields */
+#define STM32F4_SPI_CR1_CPHA BIT(0)
+#define STM32F4_SPI_CR1_CPOL BIT(1)
+#define STM32F4_SPI_CR1_MSTR BIT(2)
+#define STM32F4_SPI_CR1_BR_SHIFT 3
+#define STM32F4_SPI_CR1_BR GENMASK(5, 3)
+#define STM32F4_SPI_CR1_SPE BIT(6)
+#define STM32F4_SPI_CR1_LSBFRST BIT(7)
+#define STM32F4_SPI_CR1_SSI BIT(8)
+#define STM32F4_SPI_CR1_SSM BIT(9)
+#define STM32F4_SPI_CR1_RXONLY BIT(10)
+#define STM32F4_SPI_CR1_DFF BIT(11)
+#define STM32F4_SPI_CR1_CRCNEXT BIT(12)
+#define STM32F4_SPI_CR1_CRCEN BIT(13)
+#define STM32F4_SPI_CR1_BIDIOE BIT(14)
+#define STM32F4_SPI_CR1_BIDIMODE BIT(15)
+#define STM32F4_SPI_CR1_BR_MIN 0
+#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
+
+/* STM32F4_SPI_CR2 bit fields */
+#define STM32F4_SPI_CR2_RXDMAEN BIT(0)
+#define STM32F4_SPI_CR2_TXDMAEN BIT(1)
+#define STM32F4_SPI_CR2_SSOE BIT(2)
+#define STM32F4_SPI_CR2_FRF BIT(4)
+#define STM32F4_SPI_CR2_ERRIE BIT(5)
+#define STM32F4_SPI_CR2_RXNEIE BIT(6)
+#define STM32F4_SPI_CR2_TXEIE BIT(7)
+
+/* STM32F4_SPI_SR bit fields */
+#define STM32F4_SPI_SR_RXNE BIT(0)
+#define STM32F4_SPI_SR_TXE BIT(1)
+#define STM32F4_SPI_SR_CHSIDE BIT(2)
+#define STM32F4_SPI_SR_UDR BIT(3)
+#define STM32F4_SPI_SR_CRCERR BIT(4)
+#define STM32F4_SPI_SR_MODF BIT(5)
+#define STM32F4_SPI_SR_OVR BIT(6)
+#define STM32F4_SPI_SR_BSY BIT(7)
+#define STM32F4_SPI_SR_FRE BIT(8)
+
+/* STM32F4_SPI_I2SCFGR bit fields */
+#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11)
+
+/* STM32F4 SPI Baud Rate min/max divisor */
+#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN)
+#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX)
+
+/* STM32H7 SPI registers */
+#define STM32H7_SPI_CR1 0x00
+#define STM32H7_SPI_CR2 0x04
+#define STM32H7_SPI_CFG1 0x08
+#define STM32H7_SPI_CFG2 0x0C
+#define STM32H7_SPI_IER 0x10
+#define STM32H7_SPI_SR 0x14
+#define STM32H7_SPI_IFCR 0x18
+#define STM32H7_SPI_TXDR 0x20
+#define STM32H7_SPI_RXDR 0x30
+#define STM32H7_SPI_I2SCFGR 0x50
+
+/* STM32H7_SPI_CR1 bit fields */
+#define STM32H7_SPI_CR1_SPE BIT(0)
+#define STM32H7_SPI_CR1_MASRX BIT(8)
+#define STM32H7_SPI_CR1_CSTART BIT(9)
+#define STM32H7_SPI_CR1_CSUSP BIT(10)
+#define STM32H7_SPI_CR1_HDDIR BIT(11)
+#define STM32H7_SPI_CR1_SSI BIT(12)
+
+/* STM32H7_SPI_CR2 bit fields */
+#define STM32H7_SPI_CR2_TSIZE_SHIFT 0
+#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0)
+
+/* STM32H7_SPI_CFG1 bit fields */
+#define STM32H7_SPI_CFG1_DSIZE_SHIFT 0
+#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0)
+#define STM32H7_SPI_CFG1_FTHLV_SHIFT 5
+#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5)
+#define STM32H7_SPI_CFG1_RXDMAEN BIT(14)
+#define STM32H7_SPI_CFG1_TXDMAEN BIT(15)
+#define STM32H7_SPI_CFG1_MBR_SHIFT 28
+#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28)
+#define STM32H7_SPI_CFG1_MBR_MIN 0
+#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
+
+/* STM32H7_SPI_CFG2 bit fields */
+#define STM32H7_SPI_CFG2_MIDI_SHIFT 4
+#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4)
+#define STM32H7_SPI_CFG2_COMM_SHIFT 17
+#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17)
+#define STM32H7_SPI_CFG2_SP_SHIFT 19
+#define STM32H7_SPI_CFG2_SP GENMASK(21, 19)
+#define STM32H7_SPI_CFG2_MASTER BIT(22)
+#define STM32H7_SPI_CFG2_LSBFRST BIT(23)
+#define STM32H7_SPI_CFG2_CPHA BIT(24)
+#define STM32H7_SPI_CFG2_CPOL BIT(25)
+#define STM32H7_SPI_CFG2_SSM BIT(26)
+#define STM32H7_SPI_CFG2_AFCNTR BIT(31)
+
+/* STM32H7_SPI_IER bit fields */
+#define STM32H7_SPI_IER_RXPIE BIT(0)
+#define STM32H7_SPI_IER_TXPIE BIT(1)
+#define STM32H7_SPI_IER_DXPIE BIT(2)
+#define STM32H7_SPI_IER_EOTIE BIT(3)
+#define STM32H7_SPI_IER_TXTFIE BIT(4)
+#define STM32H7_SPI_IER_OVRIE BIT(6)
+#define STM32H7_SPI_IER_MODFIE BIT(9)
+#define STM32H7_SPI_IER_ALL GENMASK(10, 0)
+
+/* STM32H7_SPI_SR bit fields */
+#define STM32H7_SPI_SR_RXP BIT(0)
+#define STM32H7_SPI_SR_TXP BIT(1)
+#define STM32H7_SPI_SR_EOT BIT(3)
+#define STM32H7_SPI_SR_OVR BIT(6)
+#define STM32H7_SPI_SR_MODF BIT(9)
+#define STM32H7_SPI_SR_SUSP BIT(11)
+#define STM32H7_SPI_SR_RXPLVL_SHIFT 13
+#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13)
+#define STM32H7_SPI_SR_RXWNE BIT(15)
+
+/* STM32H7_SPI_IFCR bit fields */
+#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3)
+
+/* STM32H7_SPI_I2SCFGR bit fields */
+#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
+
+/* STM32H7 SPI Master Baud Rate min/max divisor */
+#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
+#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
+
+/* STM32H7 SPI Communication mode */
+#define STM32H7_SPI_FULL_DUPLEX 0
+#define STM32H7_SPI_SIMPLEX_TX 1
+#define STM32H7_SPI_SIMPLEX_RX 2
+#define STM32H7_SPI_HALF_DUPLEX 3
+
+/* SPI Communication type */
#define SPI_FULL_DUPLEX 0
#define SPI_SIMPLEX_TX 1
#define SPI_SIMPLEX_RX 2
-#define SPI_HALF_DUPLEX 3
+#define SPI_3WIRE_TX 3
+#define SPI_3WIRE_RX 4
#define SPI_1HZ_NS 1000000000
+/*
+ * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
+ * without fifo buffers.
+ */
+#define SPI_DMA_MIN_BYTES 16
+
+/**
+ * stm32_spi_reg - stm32 SPI register & bitfield desc
+ * @reg: register offset
+ * @mask: bitfield mask
+ * @shift: left shift
+ */
+struct stm32_spi_reg {
+ int reg;
+ int mask;
+ int shift;
+};
+
+/**
+ * stm32_spi_regspec - stm32 registers definition, compatible dependent data
+ * en: enable register and SPI enable bit
+ * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
+ * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
+ * cpol: clock polarity register and polarity bit
+ * cpha: clock phase register and phase bit
+ * lsb_first: LSB transmitted first register and bit
+ * br: baud rate register and bitfields
+ * rx: SPI RX data register
+ * tx: SPI TX data register
+ */
+struct stm32_spi_regspec {
+ const struct stm32_spi_reg en;
+ const struct stm32_spi_reg dma_rx_en;
+ const struct stm32_spi_reg dma_tx_en;
+ const struct stm32_spi_reg cpol;
+ const struct stm32_spi_reg cpha;
+ const struct stm32_spi_reg lsb_first;
+ const struct stm32_spi_reg br;
+ const struct stm32_spi_reg rx;
+ const struct stm32_spi_reg tx;
+};
+
+struct stm32_spi;
+
+/**
+ * stm32_spi_cfg - stm32 compatible configuration data
+ * @regs: registers descriptions
+ * @get_fifo_size: routine to get fifo size
+ * @get_bpw_mask: routine to get bits per word mask
+ * @disable: routine to disable controller
+ * @config: routine to configure controller as SPI Master
+ * @set_bpw: routine to configure registers to for bits per word
+ * @set_mode: routine to configure registers to desired mode
+ * @set_data_idleness: optional routine to configure registers to desired idle
+ * time between frames (if driver has this functionality)
+ * set_number_of_data: optional routine to configure registers to desired
+ * number of data (if driver has this functionality)
+ * @can_dma: routine to determine if the transfer is eligible for DMA use
+ * @transfer_one_dma_start: routine to start transfer a single spi_transfer
+ * using DMA
+ * @dma_rx cb: routine to call after DMA RX channel operation is complete
+ * @dma_tx cb: routine to call after DMA TX channel operation is complete
+ * @transfer_one_irq: routine to configure interrupts for driver
+ * @irq_handler_event: Interrupt handler for SPI controller events
+ * @irq_handler_thread: thread of interrupt handler for SPI controller
+ * @baud_rate_div_min: minimum baud rate divisor
+ * @baud_rate_div_max: maximum baud rate divisor
+ * @has_fifo: boolean to know if fifo is used for driver
+ * @has_startbit: boolean to know if start bit is used to start transfer
+ */
+struct stm32_spi_cfg {
+ const struct stm32_spi_regspec *regs;
+ int (*get_fifo_size)(struct stm32_spi *spi);
+ int (*get_bpw_mask)(struct stm32_spi *spi);
+ void (*disable)(struct stm32_spi *spi);
+ int (*config)(struct stm32_spi *spi);
+ void (*set_bpw)(struct stm32_spi *spi);
+ int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type);
+ void (*set_data_idleness)(struct stm32_spi *spi, u32 length);
+ int (*set_number_of_data)(struct stm32_spi *spi, u32 length);
+ void (*transfer_one_dma_start)(struct stm32_spi *spi);
+ void (*dma_rx_cb)(void *data);
+ void (*dma_tx_cb)(void *data);
+ int (*transfer_one_irq)(struct stm32_spi *spi);
+ irqreturn_t (*irq_handler_event)(int irq, void *dev_id);
+ irqreturn_t (*irq_handler_thread)(int irq, void *dev_id);
+ unsigned int baud_rate_div_min;
+ unsigned int baud_rate_div_max;
+ bool has_fifo;
+};
+
/**
* struct stm32_spi - private data of the SPI controller
* @dev: driver model representation of the controller
* @master: controller master interface
+ * @cfg: compatible configuration data
* @base: virtual memory area
* @clk: hw kernel clock feeding the SPI clock generator
* @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
@@ -151,6 +290,7 @@
struct stm32_spi {
struct device *dev;
struct spi_master *master;
+ const struct stm32_spi_cfg *cfg;
void __iomem *base;
struct clk *clk;
u32 clk_rate;
@@ -176,6 +316,40 @@ struct stm32_spi {
dma_addr_t phys_addr;
};
+static const struct stm32_spi_regspec stm32f4_spi_regspec = {
+ .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE },
+
+ .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN },
+ .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN },
+
+ .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
+ .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
+ .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
+ .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
+
+ .rx = { STM32F4_SPI_DR },
+ .tx = { STM32F4_SPI_DR },
+};
+
+static const struct stm32_spi_regspec stm32h7_spi_regspec = {
+ /* SPI data transfer is enabled but spi_ker_ck is idle.
+ * CFG1 and CFG2 registers are write protected when SPE is enabled.
+ */
+ .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE },
+
+ .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN },
+ .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN },
+
+ .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
+ .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
+ .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
+ .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
+ STM32H7_SPI_CFG1_MBR_SHIFT },
+
+ .rx = { STM32H7_SPI_RXDR },
+ .tx = { STM32H7_SPI_TXDR },
+};
+
static inline void stm32_spi_set_bits(struct stm32_spi *spi,
u32 offset, u32 bits)
{
@@ -191,22 +365,22 @@ static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
}
/**
- * stm32_spi_get_fifo_size - Return fifo size
+ * stm32h7_spi_get_fifo_size - Return fifo size
* @spi: pointer to the spi controller data structure
*/
-static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
+static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi)
{
unsigned long flags;
u32 count = 0;
spin_lock_irqsave(&spi->lock, flags);
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
- while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)
- writeb_relaxed(++count, spi->base + STM32_SPI_TXDR);
+ while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP)
+ writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR);
- stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -216,10 +390,20 @@ static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
}
/**
- * stm32_spi_get_bpw_mask - Return bits per word mask
+ * stm32f4_spi_get_bpw_mask - Return bits per word mask
* @spi: pointer to the spi controller data structure
*/
-static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
+static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n");
+ return SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+}
+
+/**
+ * stm32h7_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
{
unsigned long flags;
u32 cfg1, max_bpw;
@@ -230,10 +414,11 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
* The most significant bit at DSIZE bit field is reserved when the
* maximum data size of periperal instances is limited to 16-bit
*/
- stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE);
- cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1);
- max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT;
+ cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1);
+ max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >>
+ STM32H7_SPI_CFG1_DSIZE_SHIFT;
max_bpw += 1;
spin_unlock_irqrestore(&spi->lock, flags);
@@ -244,13 +429,16 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
}
/**
- * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value
+ * stm32_spi_prepare_mbr - Determine baud rate divisor value
* @spi: pointer to the spi controller data structure
* @speed_hz: requested speed
+ * @min_div: minimum baud rate divisor
+ * @max_div: maximum baud rate divisor
*
- * Return SPI_CFG1.MBR value in case of success or -EINVAL
+ * Return baud rate divisor value in case of success or -EINVAL
*/
-static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
+static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
+ u32 min_div, u32 max_div)
{
u32 div, mbrdiv;
@@ -263,8 +451,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
* no need to check it there.
* However, we need to ensure the following calculations.
*/
- if (div < SPI_MBR_DIV_MIN ||
- div > SPI_MBR_DIV_MAX)
+ if ((div < min_div) || (div > max_div))
return -EINVAL;
/* Determine the first power of 2 greater than or equal to div */
@@ -279,10 +466,10 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
}
/**
- * stm32_spi_prepare_fthlv - Determine FIFO threshold level
+ * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
* @spi: pointer to the spi controller data structure
*/
-static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
+static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
{
u32 fthlv, half_fifo;
@@ -306,32 +493,62 @@ static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
}
/**
- * stm32_spi_write_txfifo - Write bytes in Transmit Data Register
+ * stm32f4_spi_write_tx - Write bytes to Transmit Data Register
* @spi: pointer to the spi controller data structure
*
* Read from tx_buf depends on remaining bytes to avoid to read beyond
* tx_buf end.
*/
-static void stm32_spi_write_txfifo(struct stm32_spi *spi)
+static void stm32f4_spi_write_tx(struct stm32_spi *spi)
+{
+ if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
+ STM32F4_SPI_SR_TXE)) {
+ u32 offs = spi->cur_xferlen - spi->tx_len;
+
+ if (spi->cur_bpw == 16) {
+ const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
+
+ writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR);
+ spi->tx_len -= sizeof(u16);
+ } else {
+ const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
+
+ writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR);
+ spi->tx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
+}
+
+/**
+ * stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Read from tx_buf depends on remaining bytes to avoid to read beyond
+ * tx_buf end.
+ */
+static void stm32h7_spi_write_txfifo(struct stm32_spi *spi)
{
while ((spi->tx_len > 0) &&
- (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) {
+ (readl_relaxed(spi->base + STM32H7_SPI_SR) &
+ STM32H7_SPI_SR_TXP)) {
u32 offs = spi->cur_xferlen - spi->tx_len;
if (spi->tx_len >= sizeof(u32)) {
const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
- writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR);
+ writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u32);
} else if (spi->tx_len >= sizeof(u16)) {
const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
- writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR);
+ writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u16);
} else {
const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
- writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR);
+ writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR);
spi->tx_len -= sizeof(u8);
}
}
@@ -340,43 +557,74 @@ static void stm32_spi_write_txfifo(struct stm32_spi *spi)
}
/**
- * stm32_spi_read_rxfifo - Read bytes in Receive Data Register
+ * stm32f4_spi_read_rx - Read bytes from Receive Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Write in rx_buf depends on remaining bytes to avoid to write beyond
+ * rx_buf end.
+ */
+static void stm32f4_spi_read_rx(struct stm32_spi *spi)
+{
+ if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
+ STM32F4_SPI_SR_RXNE)) {
+ u32 offs = spi->cur_xferlen - spi->rx_len;
+
+ if (spi->cur_bpw == 16) {
+ u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
+
+ *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR);
+ spi->rx_len -= sizeof(u16);
+ } else {
+ u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
+
+ *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR);
+ spi->rx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len);
+}
+
+/**
+ * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
* @spi: pointer to the spi controller data structure
*
* Write in rx_buf depends on remaining bytes to avoid to write beyond
* rx_buf end.
*/
-static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
+static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
{
- u32 sr = readl_relaxed(spi->base + STM32_SPI_SR);
- u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+ u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
+ STM32H7_SPI_SR_RXPLVL_SHIFT;
while ((spi->rx_len > 0) &&
- ((sr & SPI_SR_RXP) ||
- (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) {
+ ((sr & STM32H7_SPI_SR_RXP) ||
+ (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
u32 offs = spi->cur_xferlen - spi->rx_len;
if ((spi->rx_len >= sizeof(u32)) ||
- (flush && (sr & SPI_SR_RXWNE))) {
+ (flush && (sr & STM32H7_SPI_SR_RXWNE))) {
u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
- *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR);
+ *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u32);
} else if ((spi->rx_len >= sizeof(u16)) ||
(flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
- *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR);
+ *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u16);
} else {
u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
- *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR);
+ *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u8);
}
- sr = readl_relaxed(spi->base + STM32_SPI_SR);
- rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+ sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
+ STM32H7_SPI_SR_RXPLVL_SHIFT;
}
dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
@@ -386,26 +634,76 @@ static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
/**
* stm32_spi_enable - Enable SPI controller
* @spi: pointer to the spi controller data structure
- *
- * SPI data transfer is enabled but spi_ker_ck is idle.
- * SPI_CFG1 and SPI_CFG2 are now write protected.
*/
static void stm32_spi_enable(struct stm32_spi *spi)
{
dev_dbg(spi->dev, "enable controller\n");
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+ stm32_spi_set_bits(spi, spi->cfg->regs->en.reg,
+ spi->cfg->regs->en.mask);
}
/**
- * stm32_spi_disable - Disable SPI controller
+ * stm32f4_spi_disable - Disable SPI controller
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f4_spi_disable(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 sr;
+
+ dev_dbg(spi->dev, "disable controller\n");
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) &
+ STM32F4_SPI_CR1_SPE)) {
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return;
+ }
+
+ /* Disable interrupts */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE |
+ STM32F4_SPI_CR2_RXNEIE |
+ STM32F4_SPI_CR2_ERRIE);
+
+ /* Wait until BSY = 0 */
+ if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR,
+ sr, !(sr & STM32F4_SPI_SR_BSY),
+ 10, 100000) < 0) {
+ dev_warn(spi->dev, "disabling condition timeout\n");
+ }
+
+ if (spi->cur_usedma && spi->dma_tx)
+ dmaengine_terminate_all(spi->dma_tx);
+ if (spi->cur_usedma && spi->dma_rx)
+ dmaengine_terminate_all(spi->dma_rx);
+
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
+
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN |
+ STM32F4_SPI_CR2_RXDMAEN);
+
+ /* Sequence to clear OVR flag */
+ readl_relaxed(spi->base + STM32F4_SPI_DR);
+ readl_relaxed(spi->base + STM32F4_SPI_SR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+}
+
+/**
+ * stm32h7_spi_disable - Disable SPI controller
* @spi: pointer to the spi controller data structure
*
* RX-Fifo is flushed when SPI controller is disabled. To prevent any data
- * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in
+ * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
* RX-Fifo.
+ * Normally, if TSIZE has been configured, we should relax the hardware at the
+ * reception of the EOT interrupt. But in case of error, EOT will not be
+ * raised. So the subsystem unprepare_message call allows us to properly
+ * complete the transfer from an hardware point of view.
*/
-static void stm32_spi_disable(struct stm32_spi *spi)
+static void stm32h7_spi_disable(struct stm32_spi *spi)
{
unsigned long flags;
u32 cr1, sr;
@@ -414,23 +712,23 @@ static void stm32_spi_disable(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
- cr1 = readl_relaxed(spi->base + STM32_SPI_CR1);
+ cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1);
- if (!(cr1 & SPI_CR1_SPE)) {
+ if (!(cr1 & STM32H7_SPI_CR1_SPE)) {
spin_unlock_irqrestore(&spi->lock, flags);
return;
}
/* Wait on EOT or suspend the flow */
- if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR,
- sr, !(sr & SPI_SR_EOT),
+ if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
+ sr, !(sr & STM32H7_SPI_SR_EOT),
10, 100000) < 0) {
- if (cr1 & SPI_CR1_CSTART) {
- writel_relaxed(cr1 | SPI_CR1_CSUSP,
- spi->base + STM32_SPI_CR1);
+ if (cr1 & STM32H7_SPI_CR1_CSTART) {
+ writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
+ spi->base + STM32H7_SPI_CR1);
if (readl_relaxed_poll_timeout_atomic(
- spi->base + STM32_SPI_SR,
- sr, !(sr & SPI_SR_SUSP),
+ spi->base + STM32H7_SPI_SR,
+ sr, !(sr & STM32H7_SPI_SR_SUSP),
10, 100000) < 0)
dev_warn(spi->dev,
"Suspend request timeout\n");
@@ -438,21 +736,21 @@ static void stm32_spi_disable(struct stm32_spi *spi)
}
if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
- stm32_spi_read_rxfifo(spi, true);
+ stm32h7_spi_read_rxfifo(spi, true);
- if (spi->cur_usedma && spi->tx_buf)
+ if (spi->cur_usedma && spi->dma_tx)
dmaengine_terminate_all(spi->dma_tx);
- if (spi->cur_usedma && spi->rx_buf)
+ if (spi->cur_usedma && spi->dma_rx)
dmaengine_terminate_all(spi->dma_rx);
- stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
- stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN |
- SPI_CFG1_RXDMAEN);
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN |
+ STM32H7_SPI_CFG1_RXDMAEN);
/* Disable interrupts and clear status flags */
- writel_relaxed(0, spi->base + STM32_SPI_IER);
- writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR);
+ writel_relaxed(0, spi->base + STM32H7_SPI_IER);
+ writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR);
spin_unlock_irqrestore(&spi->lock, flags);
}
@@ -460,26 +758,136 @@ static void stm32_spi_disable(struct stm32_spi *spi)
/**
* stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
*
- * If the current transfer size is greater than fifo size, use DMA.
+ * If driver has fifo and the current transfer size is greater than fifo size,
+ * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
*/
static bool stm32_spi_can_dma(struct spi_master *master,
struct spi_device *spi_dev,
struct spi_transfer *transfer)
{
+ unsigned int dma_size;
struct stm32_spi *spi = spi_master_get_devdata(master);
+ if (spi->cfg->has_fifo)
+ dma_size = spi->fifo_size;
+ else
+ dma_size = SPI_DMA_MIN_BYTES;
+
dev_dbg(spi->dev, "%s: %s\n", __func__,
- (transfer->len > spi->fifo_size) ? "true" : "false");
+ (transfer->len > dma_size) ? "true" : "false");
+
+ return (transfer->len > dma_size);
+}
+
+/**
+ * stm32f4_spi_irq_event - Interrupt handler for SPI controller events
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ u32 sr, mask = 0;
+ unsigned long flags;
+ bool end = false;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
+ /*
+ * BSY flag is not handled in interrupt but it is normal behavior when
+ * this flag is set.
+ */
+ sr &= ~STM32F4_SPI_SR_BSY;
+
+ if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
+ spi->cur_comm == SPI_3WIRE_TX)) {
+ /* OVR flag shouldn't be handled for TX only mode */
+ sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE;
+ mask |= STM32F4_SPI_SR_TXE;
+ }
+
+ if (!spi->cur_usedma && spi->cur_comm == SPI_FULL_DUPLEX) {
+ /* TXE flag is set and is handled when RXNE flag occurs */
+ sr &= ~STM32F4_SPI_SR_TXE;
+ mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
+ }
+
+ if (!(sr & mask)) {
+ dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (sr & STM32F4_SPI_SR_OVR) {
+ dev_warn(spi->dev, "Overrun: received value discarded\n");
+
+ /* Sequence to clear OVR flag */
+ readl_relaxed(spi->base + STM32F4_SPI_DR);
+ readl_relaxed(spi->base + STM32F4_SPI_SR);
+
+ /*
+ * If overrun is detected, it means that something went wrong,
+ * so stop the current transfer. Transfer can wait for next
+ * RXNE but DR is already read and end never happens.
+ */
+ end = true;
+ goto end_irq;
+ }
+
+ if (sr & STM32F4_SPI_SR_TXE) {
+ if (spi->tx_buf)
+ stm32f4_spi_write_tx(spi);
+ if (spi->tx_len == 0)
+ end = true;
+ }
+
+ if (sr & STM32F4_SPI_SR_RXNE) {
+ stm32f4_spi_read_rx(spi);
+ if (spi->rx_len == 0)
+ end = true;
+ else /* Load data for discontinuous mode */
+ stm32f4_spi_write_tx(spi);
+ }
+
+end_irq:
+ if (end) {
+ /* Immediately disable interrupts to do not generate new one */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR2,
+ STM32F4_SPI_CR2_TXEIE |
+ STM32F4_SPI_CR2_RXNEIE |
+ STM32F4_SPI_CR2_ERRIE);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_WAKE_THREAD;
+ }
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+
+ spi_finalize_current_transfer(master);
+ stm32f4_spi_disable(spi);
- return (transfer->len > spi->fifo_size);
+ return IRQ_HANDLED;
}
/**
- * stm32_spi_irq - Interrupt handler for SPI controller events
+ * stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller
* @irq: interrupt line
* @dev_id: SPI controller master interface
*/
-static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
+static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct stm32_spi *spi = spi_master_get_devdata(master);
@@ -489,19 +897,19 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
spin_lock_irqsave(&spi->lock, flags);
- sr = readl_relaxed(spi->base + STM32_SPI_SR);
- ier = readl_relaxed(spi->base + STM32_SPI_IER);
+ sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
+ ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
mask = ier;
/* EOTIE is triggered on EOT, SUSP and TXC events. */
- mask |= SPI_SR_SUSP;
+ mask |= STM32H7_SPI_SR_SUSP;
/*
* When TXTF is set, DXPIE and TXPIE are cleared. So in case of
* Full-Duplex, need to poll RXP event to know if there are remaining
* data, before disabling SPI.
*/
if (spi->rx_buf && !spi->cur_usedma)
- mask |= SPI_SR_RXP;
+ mask |= STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
@@ -510,10 +918,10 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
return IRQ_NONE;
}
- if (sr & SPI_SR_SUSP) {
+ if (sr & STM32H7_SPI_SR_SUSP) {
dev_warn(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32_spi_read_rxfifo(spi, false);
+ stm32h7_spi_read_rxfifo(spi, false);
/*
* If communication is suspended while using DMA, it means
* that something went wrong, so stop the current transfer
@@ -522,15 +930,15 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
end = true;
}
- if (sr & SPI_SR_MODF) {
+ if (sr & STM32H7_SPI_SR_MODF) {
dev_warn(spi->dev, "Mode fault: transfer aborted\n");
end = true;
}
- if (sr & SPI_SR_OVR) {
+ if (sr & STM32H7_SPI_SR_OVR) {
dev_warn(spi->dev, "Overrun: received value discarded\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32_spi_read_rxfifo(spi, false);
+ stm32h7_spi_read_rxfifo(spi, false);
/*
* If overrun is detected while using DMA, it means that
* something went wrong, so stop the current transfer
@@ -539,27 +947,27 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
end = true;
}
- if (sr & SPI_SR_EOT) {
+ if (sr & STM32H7_SPI_SR_EOT) {
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32_spi_read_rxfifo(spi, true);
+ stm32h7_spi_read_rxfifo(spi, true);
end = true;
}
- if (sr & SPI_SR_TXP)
+ if (sr & STM32H7_SPI_SR_TXP)
if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
- stm32_spi_write_txfifo(spi);
+ stm32h7_spi_write_txfifo(spi);
- if (sr & SPI_SR_RXP)
+ if (sr & STM32H7_SPI_SR_RXP)
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
- stm32_spi_read_rxfifo(spi, false);
+ stm32h7_spi_read_rxfifo(spi, false);
- writel_relaxed(mask, spi->base + STM32_SPI_IFCR);
+ writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
spin_unlock_irqrestore(&spi->lock, flags);
if (end) {
spi_finalize_current_transfer(master);
- stm32_spi_disable(spi);
+ stm32h7_spi_disable(spi);
}
return IRQ_HANDLED;
@@ -598,7 +1006,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
struct spi_device *spi_dev = msg->spi;
struct device_node *np = spi_dev->dev.of_node;
unsigned long flags;
- u32 cfg2_clrb = 0, cfg2_setb = 0;
+ u32 clrb = 0, setb = 0;
/* SPI slave device may need time between data frames */
spi->cur_midi = 0;
@@ -606,19 +1014,19 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
if (spi_dev->mode & SPI_CPOL)
- cfg2_setb |= SPI_CFG2_CPOL;
+ setb |= spi->cfg->regs->cpol.mask;
else
- cfg2_clrb |= SPI_CFG2_CPOL;
+ clrb |= spi->cfg->regs->cpol.mask;
if (spi_dev->mode & SPI_CPHA)
- cfg2_setb |= SPI_CFG2_CPHA;
+ setb |= spi->cfg->regs->cpha.mask;
else
- cfg2_clrb |= SPI_CFG2_CPHA;
+ clrb |= spi->cfg->regs->cpha.mask;
if (spi_dev->mode & SPI_LSB_FIRST)
- cfg2_setb |= SPI_CFG2_LSBFRST;
+ setb |= spi->cfg->regs->lsb_first.mask;
else
- cfg2_clrb |= SPI_CFG2_LSBFRST;
+ clrb |= spi->cfg->regs->lsb_first.mask;
dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
spi_dev->mode & SPI_CPOL,
@@ -628,11 +1036,12 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
spin_lock_irqsave(&spi->lock, flags);
- if (cfg2_clrb || cfg2_setb)
+ /* CPOL, CPHA and LSB FIRST bits have common register */
+ if (clrb || setb)
writel_relaxed(
- (readl_relaxed(spi->base + STM32_SPI_CFG2) &
- ~cfg2_clrb) | cfg2_setb,
- spi->base + STM32_SPI_CFG2);
+ (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) &
+ ~clrb) | setb,
+ spi->base + spi->cfg->regs->cpol.reg);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -640,12 +1049,40 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
}
/**
- * stm32_spi_dma_cb - dma callback
+ * stm32f4_spi_dma_tx_cb - dma callback
+ *
+ * DMA callback is called when the transfer is complete for DMA TX channel.
+ */
+static void stm32f4_spi_dma_tx_cb(void *data)
+{
+ struct stm32_spi *spi = data;
+
+ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
+ spi_finalize_current_transfer(spi->master);
+ stm32f4_spi_disable(spi);
+ }
+}
+
+/**
+ * stm32f4_spi_dma_rx_cb - dma callback
+ *
+ * DMA callback is called when the transfer is complete for DMA RX channel.
+ */
+static void stm32f4_spi_dma_rx_cb(void *data)
+{
+ struct stm32_spi *spi = data;
+
+ spi_finalize_current_transfer(spi->master);
+ stm32f4_spi_disable(spi);
+}
+
+/**
+ * stm32h7_spi_dma_cb - dma callback
*
* DMA callback is called when the transfer is complete or when an error
* occurs. If the transfer is complete, EOT flag is raised.
*/
-static void stm32_spi_dma_cb(void *data)
+static void stm32h7_spi_dma_cb(void *data)
{
struct stm32_spi *spi = data;
unsigned long flags;
@@ -653,11 +1090,11 @@ static void stm32_spi_dma_cb(void *data)
spin_lock_irqsave(&spi->lock, flags);
- sr = readl_relaxed(spi->base + STM32_SPI_SR);
+ sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
spin_unlock_irqrestore(&spi->lock, flags);
- if (!(sr & SPI_SR_EOT))
+ if (!(sr & STM32H7_SPI_SR_EOT))
dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
/* Now wait for EOT, or SUSP or OVR in case of error */
@@ -681,23 +1118,27 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
else
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
- /* Valid for DMA Half or Full Fifo threshold */
- if (spi->cur_fthlv == 2)
+ if (spi->cfg->has_fifo) {
+ /* Valid for DMA Half or Full Fifo threshold */
+ if (spi->cur_fthlv == 2)
+ maxburst = 1;
+ else
+ maxburst = spi->cur_fthlv;
+ } else {
maxburst = 1;
- else
- maxburst = spi->cur_fthlv;
+ }
memset(dma_conf, 0, sizeof(struct dma_slave_config));
dma_conf->direction = dir;
if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
- dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR;
+ dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg;
dma_conf->src_addr_width = buswidth;
dma_conf->src_maxburst = maxburst;
dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
buswidth, maxburst);
} else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
- dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR;
+ dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg;
dma_conf->dst_addr_width = buswidth;
dma_conf->dst_maxburst = maxburst;
@@ -707,27 +1148,68 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
}
/**
- * stm32_spi_transfer_one_irq - transfer a single spi_transfer using
- * interrupts
+ * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
+ * interrupts
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
-static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
+static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
+{
+ unsigned long flags;
+ u32 cr2 = 0;
+
+ /* Enable the interrupts relative to the current communication mode */
+ if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
+ cr2 |= STM32F4_SPI_CR2_TXEIE;
+ } else if (spi->cur_comm == SPI_FULL_DUPLEX) {
+ /* In transmit-only mode, the OVR flag is set in the SR register
+ * since the received data are never read. Therefore set OVR
+ * interrupt only when rx buffer is available.
+ */
+ cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE;
+ } else {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2);
+
+ stm32_spi_enable(spi);
+
+ /* starting data transfer when buffer is loaded */
+ if (spi->tx_buf)
+ stm32f4_spi_write_tx(spi);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return 1;
+}
+
+/**
+ * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using
+ * interrupts
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
{
unsigned long flags;
u32 ier = 0;
/* Enable the interrupts relative to the current communication mode */
if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
- ier |= SPI_IER_DXPIE;
+ ier |= STM32H7_SPI_IER_DXPIE;
else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
- ier |= SPI_IER_TXPIE;
+ ier |= STM32H7_SPI_IER_TXPIE;
else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
- ier |= SPI_IER_RXPIE;
+ ier |= STM32H7_SPI_IER_RXPIE;
/* Enable the interrupts relative to the end of transfer */
- ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
+ ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE |
+ STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
spin_lock_irqsave(&spi->lock, flags);
@@ -735,11 +1217,11 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
/* Be sure to have data in fifo before starting data transfer */
if (spi->tx_buf)
- stm32_spi_write_txfifo(spi);
+ stm32h7_spi_write_txfifo(spi);
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
- writel_relaxed(ier, spi->base + STM32_SPI_IER);
+ writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -747,6 +1229,43 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
}
/**
+ * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * transfer using DMA
+ */
+static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
+{
+ /* In DMA mode end of transfer is handled by DMA TX or RX callback. */
+ if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX ||
+ spi->cur_comm == SPI_FULL_DUPLEX) {
+ /*
+ * In transmit-only mode, the OVR flag is set in the SR register
+ * since the received data are never read. Therefore set OVR
+ * interrupt only when rx buffer is available.
+ */
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE);
+ }
+
+ stm32_spi_enable(spi);
+}
+
+/**
+ * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * transfer using DMA
+ */
+static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
+{
+ /* Enable the interrupts relative to the end of transfer */
+ stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
+ STM32H7_SPI_IER_TXTFIE |
+ STM32H7_SPI_IER_OVRIE |
+ STM32H7_SPI_IER_MODFIE);
+
+ stm32_spi_enable(spi);
+
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
+}
+
+/**
* stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
@@ -758,17 +1277,17 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
struct dma_slave_config tx_dma_conf, rx_dma_conf;
struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
unsigned long flags;
- u32 ier = 0;
spin_lock_irqsave(&spi->lock, flags);
rx_dma_desc = NULL;
- if (spi->rx_buf) {
+ if (spi->rx_buf && spi->dma_rx) {
stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
/* Enable Rx DMA request */
- stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
+ stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg,
+ spi->cfg->regs->dma_rx_en.mask);
rx_dma_desc = dmaengine_prep_slave_sg(
spi->dma_rx, xfer->rx_sg.sgl,
@@ -778,7 +1297,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
}
tx_dma_desc = NULL;
- if (spi->tx_buf) {
+ if (spi->tx_buf && spi->dma_tx) {
stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
@@ -789,12 +1308,15 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
DMA_PREP_INTERRUPT);
}
- if ((spi->tx_buf && !tx_dma_desc) ||
- (spi->rx_buf && !rx_dma_desc))
+ if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) ||
+ (spi->rx_buf && spi->dma_rx && !rx_dma_desc))
+ goto dma_desc_error;
+
+ if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc))
goto dma_desc_error;
if (rx_dma_desc) {
- rx_dma_desc->callback = stm32_spi_dma_cb;
+ rx_dma_desc->callback = spi->cfg->dma_rx_cb;
rx_dma_desc->callback_param = spi;
if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
@@ -806,8 +1328,9 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
}
if (tx_dma_desc) {
- if (spi->cur_comm == SPI_SIMPLEX_TX) {
- tx_dma_desc->callback = stm32_spi_dma_cb;
+ if (spi->cur_comm == SPI_SIMPLEX_TX ||
+ spi->cur_comm == SPI_3WIRE_TX) {
+ tx_dma_desc->callback = spi->cfg->dma_tx_cb;
tx_dma_desc->callback_param = spi;
}
@@ -819,130 +1342,278 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
dma_async_issue_pending(spi->dma_tx);
/* Enable Tx DMA request */
- stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN);
+ stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg,
+ spi->cfg->regs->dma_tx_en.mask);
}
- /* Enable the interrupts relative to the end of transfer */
- ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
- writel_relaxed(ier, spi->base + STM32_SPI_IER);
-
- stm32_spi_enable(spi);
-
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
+ spi->cfg->transfer_one_dma_start(spi);
spin_unlock_irqrestore(&spi->lock, flags);
return 1;
dma_submit_error:
- if (spi->rx_buf)
+ if (spi->dma_rx)
dmaengine_terminate_all(spi->dma_rx);
dma_desc_error:
- stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
+ stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg,
+ spi->cfg->regs->dma_rx_en.mask);
spin_unlock_irqrestore(&spi->lock, flags);
dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
- return stm32_spi_transfer_one_irq(spi);
+ spi->cur_usedma = false;
+ return spi->cfg->transfer_one_irq(spi);
}
/**
- * stm32_spi_transfer_one_setup - common setup to transfer a single
- * spi_transfer either using DMA or
- * interrupts.
+ * stm32f4_spi_set_bpw - Configure bits per word
+ * @spi: pointer to the spi controller data structure
*/
-static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
- struct spi_device *spi_dev,
- struct spi_transfer *transfer)
+static void stm32f4_spi_set_bpw(struct stm32_spi *spi)
{
- unsigned long flags;
- u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0;
- u32 mode, nb_words;
- int ret = 0;
-
- spin_lock_irqsave(&spi->lock, flags);
+ if (spi->cur_bpw == 16)
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ else
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+}
- if (spi->cur_bpw != transfer->bits_per_word) {
- u32 bpw, fthlv;
+/**
+ * stm32h7_spi_set_bpw - configure bits per word
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
+{
+ u32 bpw, fthlv;
+ u32 cfg1_clrb = 0, cfg1_setb = 0;
- spi->cur_bpw = transfer->bits_per_word;
- bpw = spi->cur_bpw - 1;
+ bpw = spi->cur_bpw - 1;
- cfg1_clrb |= SPI_CFG1_DSIZE;
- cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE;
+ cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE;
+ cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
+ STM32H7_SPI_CFG1_DSIZE;
- spi->cur_fthlv = stm32_spi_prepare_fthlv(spi);
- fthlv = spi->cur_fthlv - 1;
+ spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
+ fthlv = spi->cur_fthlv - 1;
- cfg1_clrb |= SPI_CFG1_FTHLV;
- cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV;
- }
+ cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
+ cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) &
+ STM32H7_SPI_CFG1_FTHLV;
- if (spi->cur_speed != transfer->speed_hz) {
- int mbr;
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
+ ~cfg1_clrb) | cfg1_setb,
+ spi->base + STM32H7_SPI_CFG1);
+}
- /* Update spi->cur_speed with real clock speed */
- mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz);
- if (mbr < 0) {
- ret = mbr;
- goto out;
- }
+/**
+ * stm32_spi_set_mbr - Configure baud rate divisor in master mode
+ * @spi: pointer to the spi controller data structure
+ * @mbrdiv: baud rate divisor value
+ */
+static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
+{
+ u32 clrb = 0, setb = 0;
- transfer->speed_hz = spi->cur_speed;
+ clrb |= spi->cfg->regs->br.mask;
+ setb |= ((u32)mbrdiv << spi->cfg->regs->br.shift) &
+ spi->cfg->regs->br.mask;
- cfg1_clrb |= SPI_CFG1_MBR;
- cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR;
- }
+ writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) &
+ ~clrb) | setb,
+ spi->base + spi->cfg->regs->br.reg);
+}
- if (cfg1_clrb || cfg1_setb)
- writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) &
- ~cfg1_clrb) | cfg1_setb,
- spi->base + STM32_SPI_CFG1);
+/**
+ * stm32_spi_communication_type - return transfer communication type
+ * @spi_dev: pointer to the spi device
+ * transfer: pointer to spi transfer
+ */
+static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ unsigned int type = SPI_FULL_DUPLEX;
- mode = SPI_FULL_DUPLEX;
if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
/*
* SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
- * is forbidden und unvalidated by SPI subsystem so depending
+ * is forbidden and unvalidated by SPI subsystem so depending
* on the valid buffer, we can determine the direction of the
* transfer.
*/
- mode = SPI_HALF_DUPLEX;
if (!transfer->tx_buf)
- stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
- else if (!transfer->rx_buf)
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
+ type = SPI_3WIRE_RX;
+ else
+ type = SPI_3WIRE_TX;
} else {
if (!transfer->tx_buf)
- mode = SPI_SIMPLEX_RX;
+ type = SPI_SIMPLEX_RX;
else if (!transfer->rx_buf)
- mode = SPI_SIMPLEX_TX;
+ type = SPI_SIMPLEX_TX;
+ }
+
+ return type;
+}
+
+/**
+ * stm32f4_spi_set_mode - configure communication mode
+ * @spi: pointer to the spi controller data structure
+ * @comm_type: type of communication to configure
+ */
+static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
+{
+ if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) {
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIMODE |
+ STM32F4_SPI_CR1_BIDIOE);
+ } else if (comm_type == SPI_FULL_DUPLEX) {
+ stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
+ STM32F4_SPI_CR1_BIDIMODE |
+ STM32F4_SPI_CR1_BIDIOE);
+ } else {
+ return -EINVAL;
}
- if (spi->cur_comm != mode) {
- spi->cur_comm = mode;
- cfg2_clrb |= SPI_CFG2_COMM;
- cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM;
+ return 0;
+}
+
+/**
+ * stm32h7_spi_set_mode - configure communication mode
+ * @spi: pointer to the spi controller data structure
+ * @comm_type: type of communication to configure
+ */
+static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
+{
+ u32 mode;
+ u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+ if (comm_type == SPI_3WIRE_RX) {
+ mode = STM32H7_SPI_HALF_DUPLEX;
+ stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
+ } else if (comm_type == SPI_3WIRE_TX) {
+ mode = STM32H7_SPI_HALF_DUPLEX;
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
+ } else if (comm_type == SPI_SIMPLEX_RX) {
+ mode = STM32H7_SPI_SIMPLEX_RX;
+ } else if (comm_type == SPI_SIMPLEX_TX) {
+ mode = STM32H7_SPI_SIMPLEX_TX;
+ } else {
+ mode = STM32H7_SPI_FULL_DUPLEX;
}
- cfg2_clrb |= SPI_CFG2_MIDI;
- if ((transfer->len > 1) && (spi->cur_midi > 0)) {
+ cfg2_clrb |= STM32H7_SPI_CFG2_COMM;
+ cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) &
+ STM32H7_SPI_CFG2_COMM;
+
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
+ ~cfg2_clrb) | cfg2_setb,
+ spi->base + STM32H7_SPI_CFG2);
+
+ return 0;
+}
+
+/**
+ * stm32h7_spi_data_idleness - configure minimum time delay inserted between two
+ * consecutive data frames in master mode
+ * @spi: pointer to the spi controller data structure
+ * @len: transfer len
+ */
+static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
+{
+ u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+ cfg2_clrb |= STM32H7_SPI_CFG2_MIDI;
+ if ((len > 1) && (spi->cur_midi > 0)) {
u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
- (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT);
+ (u32)STM32H7_SPI_CFG2_MIDI >>
+ STM32H7_SPI_CFG2_MIDI_SHIFT);
dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
sck_period_ns, midi, midi * sck_period_ns);
+ cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) &
+ STM32H7_SPI_CFG2_MIDI;
+ }
+
+ writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
+ ~cfg2_clrb) | cfg2_setb,
+ spi->base + STM32H7_SPI_CFG2);
+}
+
+/**
+ * stm32h7_spi_number_of_data - configure number of data at current transfer
+ * @spi: pointer to the spi controller data structure
+ * @len: transfer length
+ */
+static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
+{
+ u32 cr2_clrb = 0, cr2_setb = 0;
+
+ if (nb_words <= (STM32H7_SPI_CR2_TSIZE >>
+ STM32H7_SPI_CR2_TSIZE_SHIFT)) {
+ cr2_clrb |= STM32H7_SPI_CR2_TSIZE;
+ cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT;
+ writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) &
+ ~cr2_clrb) | cr2_setb,
+ spi->base + STM32H7_SPI_CR2);
+ } else {
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+/**
+ * stm32_spi_transfer_one_setup - common setup to transfer a single
+ * spi_transfer either using DMA or
+ * interrupts.
+ */
+static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
+ struct spi_device *spi_dev,
+ struct spi_transfer *transfer)
+{
+ unsigned long flags;
+ unsigned int comm_type;
+ int nb_words, ret = 0;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ if (spi->cur_bpw != transfer->bits_per_word) {
+ spi->cur_bpw = transfer->bits_per_word;
+ spi->cfg->set_bpw(spi);
+ }
- cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI;
+ if (spi->cur_speed != transfer->speed_hz) {
+ int mbr;
+
+ /* Update spi->cur_speed with real clock speed */
+ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
+ spi->cfg->baud_rate_div_min,
+ spi->cfg->baud_rate_div_max);
+ if (mbr < 0) {
+ ret = mbr;
+ goto out;
+ }
+
+ transfer->speed_hz = spi->cur_speed;
+ stm32_spi_set_mbr(spi, mbr);
}
- if (cfg2_clrb || cfg2_setb)
- writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) &
- ~cfg2_clrb) | cfg2_setb,
- spi->base + STM32_SPI_CFG2);
+ comm_type = stm32_spi_communication_type(spi_dev, transfer);
+ if (spi->cur_comm != comm_type) {
+ ret = spi->cfg->set_mode(spi, comm_type);
+
+ if (ret < 0)
+ goto out;
+
+ spi->cur_comm = comm_type;
+ }
+
+ if (spi->cfg->set_data_idleness)
+ spi->cfg->set_data_idleness(spi, transfer->len);
if (spi->cur_bpw <= 8)
nb_words = transfer->len;
@@ -950,13 +1621,11 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
else
nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
- nb_words <<= SPI_CR2_TSIZE_SHIFT;
- if (nb_words <= SPI_CR2_TSIZE) {
- writel_relaxed(nb_words, spi->base + STM32_SPI_CR2);
- } else {
- ret = -EMSGSIZE;
- goto out;
+ if (spi->cfg->set_number_of_data) {
+ ret = spi->cfg->set_number_of_data(spi, nb_words);
+ if (ret < 0)
+ goto out;
}
spi->cur_xferlen = transfer->len;
@@ -997,7 +1666,7 @@ static int stm32_spi_transfer_one(struct spi_master *master,
spi->rx_len = spi->rx_buf ? transfer->len : 0;
spi->cur_usedma = (master->can_dma &&
- stm32_spi_can_dma(master, spi_dev, transfer));
+ master->can_dma(master, spi_dev, transfer));
ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
if (ret) {
@@ -1008,47 +1677,73 @@ static int stm32_spi_transfer_one(struct spi_master *master,
if (spi->cur_usedma)
return stm32_spi_transfer_one_dma(spi, transfer);
else
- return stm32_spi_transfer_one_irq(spi);
+ return spi->cfg->transfer_one_irq(spi);
}
/**
* stm32_spi_unprepare_msg - relax the hardware
- *
- * Normally, if TSIZE has been configured, we should relax the hardware at the
- * reception of the EOT interrupt. But in case of error, EOT will not be
- * raised. So the subsystem unprepare_message call allows us to properly
- * complete the transfer from an hardware point of view.
*/
static int stm32_spi_unprepare_msg(struct spi_master *master,
struct spi_message *msg)
{
struct stm32_spi *spi = spi_master_get_devdata(master);
- stm32_spi_disable(spi);
+ spi->cfg->disable(spi);
+
+ return 0;
+}
+
+/**
+ * stm32f4_spi_config - Configure SPI controller as SPI master
+ */
+static int stm32f4_spi_config(struct stm32_spi *spi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+ /* Ensure I2SMOD bit is kept cleared */
+ stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR,
+ STM32F4_SPI_I2SCFGR_I2SMOD);
+
+ /*
+ * - SS input value high
+ * - transmitter half duplex direction
+ * - Set the master mode (default Motorola mode)
+ * - Consider 1 master/n slaves configuration and
+ * SS input value is determined by the SSI bit
+ */
+ stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI |
+ STM32F4_SPI_CR1_BIDIOE |
+ STM32F4_SPI_CR1_MSTR |
+ STM32F4_SPI_CR1_SSM);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
return 0;
}
/**
- * stm32_spi_config - Configure SPI controller as SPI master
+ * stm32h7_spi_config - Configure SPI controller as SPI master
*/
-static int stm32_spi_config(struct stm32_spi *spi)
+static int stm32h7_spi_config(struct stm32_spi *spi)
{
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags);
/* Ensure I2SMOD bit is kept cleared */
- stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD);
+ stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR,
+ STM32H7_SPI_I2SCFGR_I2SMOD);
/*
* - SS input value high
* - transmitter half duplex direction
* - automatic communication suspend when RX-Fifo is full
*/
- stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI |
- SPI_CR1_HDDIR |
- SPI_CR1_MASRX);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI |
+ STM32H7_SPI_CR1_HDDIR |
+ STM32H7_SPI_CR1_MASRX);
/*
* - Set the master mode (default Motorola mode)
@@ -1056,17 +1751,56 @@ static int stm32_spi_config(struct stm32_spi *spi)
* SS input value is determined by the SSI bit
* - keep control of all associated GPIOs
*/
- stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER |
- SPI_CFG2_SSM |
- SPI_CFG2_AFCNTR);
+ stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER |
+ STM32H7_SPI_CFG2_SSM |
+ STM32H7_SPI_CFG2_AFCNTR);
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
}
+static const struct stm32_spi_cfg stm32f4_spi_cfg = {
+ .regs = &stm32f4_spi_regspec,
+ .get_bpw_mask = stm32f4_spi_get_bpw_mask,
+ .disable = stm32f4_spi_disable,
+ .config = stm32f4_spi_config,
+ .set_bpw = stm32f4_spi_set_bpw,
+ .set_mode = stm32f4_spi_set_mode,
+ .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
+ .dma_tx_cb = stm32f4_spi_dma_tx_cb,
+ .dma_rx_cb = stm32f4_spi_dma_rx_cb,
+ .transfer_one_irq = stm32f4_spi_transfer_one_irq,
+ .irq_handler_event = stm32f4_spi_irq_event,
+ .irq_handler_thread = stm32f4_spi_irq_thread,
+ .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
+ .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
+ .has_fifo = false,
+};
+
+static const struct stm32_spi_cfg stm32h7_spi_cfg = {
+ .regs = &stm32h7_spi_regspec,
+ .get_fifo_size = stm32h7_spi_get_fifo_size,
+ .get_bpw_mask = stm32h7_spi_get_bpw_mask,
+ .disable = stm32h7_spi_disable,
+ .config = stm32h7_spi_config,
+ .set_bpw = stm32h7_spi_set_bpw,
+ .set_mode = stm32h7_spi_set_mode,
+ .set_data_idleness = stm32h7_spi_data_idleness,
+ .set_number_of_data = stm32h7_spi_number_of_data,
+ .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
+ .dma_rx_cb = stm32h7_spi_dma_cb,
+ .dma_tx_cb = stm32h7_spi_dma_cb,
+ .transfer_one_irq = stm32h7_spi_transfer_one_irq,
+ .irq_handler_thread = stm32h7_spi_irq_thread,
+ .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
+ .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
+ .has_fifo = true,
+};
+
static const struct of_device_id stm32_spi_of_match[] = {
- { .compatible = "st,stm32h7-spi", },
+ { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg },
+ { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg },
{},
};
MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
@@ -1090,12 +1824,17 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->master = master;
spin_lock_init(&spi->lock);
+ spi->cfg = (const struct stm32_spi_cfg *)
+ of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev)->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(spi->base)) {
ret = PTR_ERR(spi->base);
goto err_master_put;
}
+
spi->phys_addr = (dma_addr_t)res->start;
spi->irq = platform_get_irq(pdev, 0);
@@ -1104,16 +1843,17 @@ static int stm32_spi_probe(struct platform_device *pdev)
ret = -ENOENT;
goto err_master_put;
}
- ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL,
- stm32_spi_irq, IRQF_ONESHOT,
- pdev->name, master);
+ ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
+ spi->cfg->irq_handler_event,
+ spi->cfg->irq_handler_thread,
+ IRQF_ONESHOT, pdev->name, master);
if (ret) {
dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
ret);
goto err_master_put;
}
- spi->clk = devm_clk_get(&pdev->dev, 0);
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
ret = PTR_ERR(spi->clk);
dev_err(&pdev->dev, "clk get failed: %d\n", ret);
@@ -1139,9 +1879,10 @@ static int stm32_spi_probe(struct platform_device *pdev)
reset_control_deassert(spi->rst);
}
- spi->fifo_size = stm32_spi_get_fifo_size(spi);
+ if (spi->cfg->has_fifo)
+ spi->fifo_size = spi->cfg->get_fifo_size(spi);
- ret = stm32_spi_config(spi);
+ ret = spi->cfg->config(spi);
if (ret) {
dev_err(&pdev->dev, "controller configuration failed: %d\n",
ret);
@@ -1151,11 +1892,11 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->bus_num = pdev->id;
- master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST |
- SPI_3WIRE | SPI_LOOP;
- master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi);
- master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN;
- master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_3WIRE;
+ master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
+ master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
+ master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
master->setup = stm32_spi_setup;
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
@@ -1233,7 +1974,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct stm32_spi *spi = spi_master_get_devdata(master);
- stm32_spi_disable(spi);
+ spi->cfg->disable(spi);
if (master->dma_tx)
dma_release_channel(master->dma_tx);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 5f19016bbf10..b9fb6493cd6b 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -490,8 +490,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base) {
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
- MEM_CS_EN(spi->chip_select),
- MEM_CS_MASK);
+ MEM_CS_MASK,
+ MEM_CS_EN(spi->chip_select));
}
qspi->mmap_enabled = true;
}
@@ -503,7 +503,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base)
regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
- 0, MEM_CS_MASK);
+ MEM_CS_MASK, 0);
qspi->mmap_enabled = false;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 97d137591b18..e7e8ea1edcce 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1008,6 +1008,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* RX */
dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
+ if (!dma->sg_rx_p)
+ return;
+
sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_rx_p;
@@ -1068,6 +1071,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
+ if (!dma->sg_tx_p)
+ return;
+
sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
/* offset, length setting */
sg = dma->sg_tx_p;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9a7def7c3237..93986f879b09 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -19,6 +19,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/property.h>
@@ -578,7 +579,10 @@ int spi_add_device(struct spi_device *spi)
goto done;
}
- if (ctlr->cs_gpios)
+ /* Descriptors take precedence */
+ if (ctlr->cs_gpiods)
+ spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
+ else if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/* Drivers may modify this initial i/o setup, but will
@@ -772,10 +776,21 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (gpio_is_valid(spi->cs_gpio)) {
- /* Honour the SPI_NO_CS flag */
- if (!(spi->mode & SPI_NO_CS))
- gpio_set_value(spi->cs_gpio, !enable);
+ if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
+ /*
+ * Honour the SPI_NO_CS flag and invert the enable line, as
+ * active low is default for SPI. Execution paths that handle
+ * polarity inversion in gpiolib (such as device tree) will
+ * enforce active high using the SPI_CS_HIGH resulting in a
+ * double inversion through the code above.
+ */
+ if (!(spi->mode & SPI_NO_CS)) {
+ if (spi->cs_gpiod)
+ gpiod_set_value_cansleep(spi->cs_gpiod,
+ !enable);
+ else
+ gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ }
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
spi->controller->set_cs)
@@ -1615,13 +1630,21 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi->mode |= SPI_CPHA;
if (of_property_read_bool(nc, "spi-cpol"))
spi->mode |= SPI_CPOL;
- if (of_property_read_bool(nc, "spi-cs-high"))
- spi->mode |= SPI_CS_HIGH;
if (of_property_read_bool(nc, "spi-3wire"))
spi->mode |= SPI_3WIRE;
if (of_property_read_bool(nc, "spi-lsb-first"))
spi->mode |= SPI_LSB_FIRST;
+ /*
+ * For descriptors associated with the device, polarity inversion is
+ * handled in the gpiolib, so all chip selects are "active high" in
+ * the logical sense, the gpiolib will invert the line if need be.
+ */
+ if (ctlr->use_gpio_descriptors)
+ spi->mode |= SPI_CS_HIGH;
+ else if (of_property_read_bool(nc, "spi-cs-high"))
+ spi->mode |= SPI_CS_HIGH;
+
/* Device DUAL/QUAD mode */
if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
switch (value) {
@@ -2137,6 +2160,60 @@ static int of_spi_register_master(struct spi_controller *ctlr)
}
#endif
+/**
+ * spi_get_gpio_descs() - grab chip select GPIOs for the master
+ * @ctlr: The SPI master to grab GPIO descriptors for
+ */
+static int spi_get_gpio_descs(struct spi_controller *ctlr)
+{
+ int nb, i;
+ struct gpio_desc **cs;
+ struct device *dev = &ctlr->dev;
+
+ nb = gpiod_count(dev, "cs");
+ ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
+
+ /* No GPIOs at all is fine, else return the error */
+ if (nb == 0 || nb == -ENOENT)
+ return 0;
+ else if (nb < 0)
+ return nb;
+
+ cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
+ GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ ctlr->cs_gpiods = cs;
+
+ for (i = 0; i < nb; i++) {
+ /*
+ * Most chipselects are active low, the inverted
+ * semantics are handled by special quirks in gpiolib,
+ * so initializing them GPIOD_OUT_LOW here means
+ * "unasserted", in most cases this will drive the physical
+ * line high.
+ */
+ cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
+ GPIOD_OUT_LOW);
+
+ if (cs[i]) {
+ /*
+ * If we find a CS GPIO, name it after the device and
+ * chip select line.
+ */
+ char *gpioname;
+
+ gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
+ dev_name(dev), i);
+ if (!gpioname)
+ return -ENOMEM;
+ gpiod_set_consumer_name(cs[i], gpioname);
+ }
+ }
+
+ return 0;
+}
+
static int spi_controller_check_ops(struct spi_controller *ctlr)
{
/*
@@ -2199,9 +2276,21 @@ int spi_register_controller(struct spi_controller *ctlr)
return status;
if (!spi_controller_is_slave(ctlr)) {
- status = of_spi_register_master(ctlr);
- if (status)
- return status;
+ if (ctlr->use_gpio_descriptors) {
+ status = spi_get_gpio_descs(ctlr);
+ if (status)
+ return status;
+ /*
+ * A controller using GPIO descriptors always
+ * supports SPI_CS_HIGH if need be.
+ */
+ ctlr->mode_bits |= SPI_CS_HIGH;
+ } else {
+ /* Legacy code path for GPIOs from DT */
+ status = of_spi_register_master(ctlr);
+ if (status)
+ return status;
+ }
}
/* even if it's just one always-selected device, there must
@@ -2915,6 +3004,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
+ spi->cs_gpiod ||
gpio_is_valid(spi->cs_gpio))) {
size_t maxsize;
int ret;
@@ -2961,6 +3051,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* it is not set for this transfer.
* Set transfer tx_nbits and rx_nbits as single transfer default
* (SPI_NBITS_SINGLE) if it is not set for this transfer.
+ * Ensure transfer word_delay is at least as long as that required by
+ * device itself.
*/
message->frame_length = 0;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
@@ -3031,6 +3123,9 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
!(spi->mode & SPI_RX_QUAD))
return -EINVAL;
}
+
+ if (xfer->word_delay_usecs < spi->word_delay_usecs)
+ xfer->word_delay_usecs = spi->word_delay_usecs;
}
message->status = -EINPROGRESS;
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index e14d7cc411c9..73b87da15eb2 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -137,7 +137,7 @@ static int isFileReadable(char *path)
ret = PTR_ERR(fp);
}
else {
- oldfs = get_fs(); set_fs(get_ds());
+ oldfs = get_fs(); set_fs(KERNEL_DS);
if (1!=readFile(fp, &buf, 1))
ret = -EINVAL;
@@ -165,7 +165,7 @@ static int retriveFromFile(char *path, u8 *buf, u32 sz)
if (0 == (ret =openFile(&fp, path, O_RDONLY, 0))) {
DBG_871X("%s openFile path:%s fp =%p\n", __func__, path , fp);
- oldfs = get_fs(); set_fs(get_ds());
+ oldfs = get_fs(); set_fs(KERNEL_DS);
ret =readFile(fp, buf, sz);
set_fs(oldfs);
closeFile(fp);
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index e5efce3c08e2..947f9b28de9e 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -699,8 +699,10 @@ static int __init optee_driver_init(void)
return -ENODEV;
np = of_find_matching_node(fw_np, optee_match);
- if (!np || !of_device_is_available(np))
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
optee = optee_probe(np);
of_node_put(np);
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index 0212f0ee8aea..b96fedc77ee5 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -60,9 +60,9 @@ struct mdev_device *mdev_from_dev(struct device *dev)
}
EXPORT_SYMBOL(mdev_from_dev);
-uuid_le mdev_uuid(struct mdev_device *mdev)
+const guid_t *mdev_uuid(struct mdev_device *mdev)
{
- return mdev->uuid;
+ return &mdev->uuid;
}
EXPORT_SYMBOL(mdev_uuid);
@@ -88,8 +88,7 @@ static void mdev_release_parent(struct kref *kref)
put_device(dev);
}
-static
-inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
+static inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
{
if (parent)
kref_get(&parent->ref);
@@ -276,7 +275,8 @@ static void mdev_device_release(struct device *dev)
kfree(mdev);
}
-int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
+int mdev_device_create(struct kobject *kobj,
+ struct device *dev, const guid_t *uuid)
{
int ret;
struct mdev_device *mdev, *tmp;
@@ -291,7 +291,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
/* Check for duplicate */
list_for_each_entry(tmp, &mdev_list, next) {
- if (!uuid_le_cmp(tmp->uuid, uuid)) {
+ if (guid_equal(&tmp->uuid, uuid)) {
mutex_unlock(&mdev_list_lock);
ret = -EEXIST;
goto mdev_fail;
@@ -305,7 +305,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
goto mdev_fail;
}
- memcpy(&mdev->uuid, &uuid, sizeof(uuid_le));
+ guid_copy(&mdev->uuid, uuid);
list_add(&mdev->next, &mdev_list);
mutex_unlock(&mdev_list_lock);
@@ -315,7 +315,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
mdev->dev.parent = dev;
mdev->dev.bus = &mdev_bus_type;
mdev->dev.release = mdev_device_release;
- dev_set_name(&mdev->dev, "%pUl", uuid.b);
+ dev_set_name(&mdev->dev, "%pUl", uuid);
ret = device_register(&mdev->dev);
if (ret) {
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index b5819b7d7ef7..379758c52b1b 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -28,7 +28,7 @@ struct mdev_parent {
struct mdev_device {
struct device dev;
struct mdev_parent *parent;
- uuid_le uuid;
+ guid_t uuid;
void *driver_data;
struct kref ref;
struct list_head next;
@@ -58,7 +58,8 @@ void parent_remove_sysfs_files(struct mdev_parent *parent);
int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type);
void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
-int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid);
+int mdev_device_create(struct kobject *kobj,
+ struct device *dev, const guid_t *uuid);
int mdev_device_remove(struct device *dev, bool force_remove);
#endif /* MDEV_PRIVATE_H */
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index ce5dd219f2c8..5193a0e0ce5a 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -55,7 +55,7 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev,
const char *buf, size_t count)
{
char *str;
- uuid_le uuid;
+ guid_t uuid;
int ret;
if ((count < UUID_STRING_LEN) || (count > UUID_STRING_LEN + 1))
@@ -65,12 +65,12 @@ static ssize_t create_store(struct kobject *kobj, struct device *dev,
if (!str)
return -ENOMEM;
- ret = uuid_le_to_bin(str, &uuid);
+ ret = guid_parse(str, &uuid);
kfree(str);
if (ret)
return ret;
- ret = mdev_device_create(kobj, dev, uuid);
+ ret = mdev_device_create(kobj, dev, &uuid);
if (ret)
return ret;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index ff60bd1ea587..a25659b5a5d1 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -209,6 +209,57 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
return false;
}
+static void vfio_pci_probe_power_state(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u16 pmcsr;
+
+ if (!pdev->pm_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
+}
+
+/*
+ * pci_set_power_state() wrapper handling devices which perform a soft reset on
+ * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
+ * restore when returned to D0. Saved separately from pci_saved_state for use
+ * by PM capability emulation and separately from pci_dev internal saved state
+ * to avoid it being overwritten and consumed around other resets.
+ */
+int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ bool needs_restore = false, needs_save = false;
+ int ret;
+
+ if (vdev->needs_pm_restore) {
+ if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
+ pci_save_state(pdev);
+ needs_save = true;
+ }
+
+ if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
+ needs_restore = true;
+ }
+
+ ret = pci_set_power_state(pdev, state);
+
+ if (!ret) {
+ /* D3 might be unsupported via quirk, skip unless in D3 */
+ if (needs_save && pdev->current_state >= PCI_D3hot) {
+ vdev->pm_save = pci_store_saved_state(pdev);
+ } else if (needs_restore) {
+ pci_load_and_free_saved_state(pdev, &vdev->pm_save);
+ pci_restore_state(pdev);
+ }
+ }
+
+ return ret;
+}
+
static int vfio_pci_enable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
@@ -216,7 +267,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
u16 cmd;
u8 msix_pos;
- pci_set_power_state(pdev, PCI_D0);
+ vfio_pci_set_power_state(vdev, PCI_D0);
/* Don't allow our initial saved state to include busmaster */
pci_clear_master(pdev);
@@ -407,7 +458,7 @@ out:
vfio_pci_try_bus_reset(vdev);
if (!disable_idle_d3)
- pci_set_power_state(pdev, PCI_D3hot);
+ vfio_pci_set_power_state(vdev, PCI_D3hot);
}
static void vfio_pci_release(void *device_data)
@@ -708,6 +759,7 @@ static long vfio_pci_ioctl(void *device_data,
{
void __iomem *io;
size_t size;
+ u16 orig_cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
@@ -723,15 +775,23 @@ static long vfio_pci_ioctl(void *device_data,
break;
}
- /* Is it really there? */
+ /*
+ * Is it really there? Enable memory decode for
+ * implicit access in pci_map_rom().
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ orig_cmd | PCI_COMMAND_MEMORY);
+
io = pci_map_rom(pdev, &size);
- if (!io || !size) {
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
info.size = 0;
- break;
}
- pci_unmap_rom(pdev, io);
- info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
@@ -1286,6 +1346,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vfio_pci_set_vga_decode(vdev, false));
}
+ vfio_pci_probe_power_state(vdev);
+
if (!disable_idle_d3) {
/*
* pci-core sets the device power state to an unknown value at
@@ -1296,8 +1358,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* be able to get to D3. Therefore first do a D0 transition
* before going to D3.
*/
- pci_set_power_state(pdev, PCI_D0);
- pci_set_power_state(pdev, PCI_D3hot);
+ vfio_pci_set_power_state(vdev, PCI_D0);
+ vfio_pci_set_power_state(vdev, PCI_D3hot);
}
return ret;
@@ -1316,6 +1378,11 @@ static void vfio_pci_remove(struct pci_dev *pdev)
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
kfree(vdev->region);
mutex_destroy(&vdev->ioeventfds_lock);
+
+ if (!disable_idle_d3)
+ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ kfree(vdev->pm_save);
kfree(vdev);
if (vfio_pci_is_vga(pdev)) {
@@ -1324,9 +1391,6 @@ static void vfio_pci_remove(struct pci_dev *pdev)
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
}
-
- if (!disable_idle_d3)
- pci_set_power_state(pdev, PCI_D0);
}
static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
@@ -1551,7 +1615,7 @@ put_devs:
tmp->needs_reset = false;
if (tmp != vdev && !disable_idle_d3)
- pci_set_power_state(tmp->pdev, PCI_D3hot);
+ vfio_pci_set_power_state(tmp, PCI_D3hot);
}
vfio_device_put(devs.devices[i]);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 423ea1f98441..e82b51114687 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -691,7 +691,7 @@ static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
break;
}
- pci_set_power_state(vdev->pdev, state);
+ vfio_pci_set_power_state(vdev, state);
}
return count;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 8c0009f00818..1812cf22fc4f 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -114,7 +114,9 @@ struct vfio_pci_device {
bool has_vga;
bool needs_reset;
bool nointx;
+ bool needs_pm_restore;
struct pci_saved_state *pci_saved_state;
+ struct pci_saved_state *pm_save;
struct vfio_pci_reflck *reflck;
int refcnt;
int ioeventfds_nr;
@@ -161,6 +163,10 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
unsigned int type, unsigned int subtype,
const struct vfio_pci_regops *ops,
size_t size, u32 flags, void *data);
+
+extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
+ pci_power_t state);
+
#ifdef CONFIG_VFIO_PCI_IGD
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
#else
diff --git a/drivers/vfio/platform/reset/Makefile b/drivers/vfio/platform/reset/Makefile
index 57abd4f0ac5b..7294c5ea122e 100644
--- a/drivers/vfio/platform/reset/Makefile
+++ b/drivers/vfio/platform/reset/Makefile
@@ -2,8 +2,6 @@
vfio-platform-calxedaxgmac-y := vfio_platform_calxedaxgmac.o
vfio-platform-amdxgbe-y := vfio_platform_amdxgbe.o
-ccflags-y += -Idrivers/vfio/platform
-
obj-$(CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET) += vfio-platform-calxedaxgmac.o
obj-$(CONFIG_VFIO_PLATFORM_AMDXGBE_RESET) += vfio-platform-amdxgbe.o
obj-$(CONFIG_VFIO_PLATFORM_BCMFLEXRM_RESET) += vfio_platform_bcmflexrm.o
diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
index bcd419cfd79c..3ddb2704221d 100644
--- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
+++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c
@@ -25,7 +25,7 @@
#include <uapi/linux/mdio.h>
#include <linux/delay.h>
-#include "vfio_platform_private.h"
+#include "../vfio_platform_private.h"
#define DMA_MR 0x3000
#define MAC_VR 0x0110
diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
index d45c3be71198..16165a62b86d 100644
--- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
+++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include "vfio_platform_private.h"
+#include "../vfio_platform_private.h"
/* FlexRM configuration */
#define RING_REGS_SIZE 0x10000
diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
index 49e5df6e8f29..e0356de5df54 100644
--- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
+++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/io.h>
-#include "vfio_platform_private.h"
+#include "../vfio_platform_private.h"
#define DRIVER_VERSION "0.1"
#define DRIVER_AUTHOR "Eric Auger <eric.auger@linaro.org>"
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 64833879f75d..a3030cdf3c18 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -2219,12 +2219,12 @@ static int __init vfio_init(void)
vfio.class->devnode = vfio_devnode;
- ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
+ ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
if (ret)
goto err_alloc_chrdev;
cdev_init(&vfio.group_cdev, &vfio_group_fops);
- ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
+ ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1);
if (ret)
goto err_cdev_add;
@@ -2236,7 +2236,7 @@ static int __init vfio_init(void)
return 0;
err_cdev_add:
- unregister_chrdev_region(vfio.group_devt, MINORMASK);
+ unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
@@ -2254,7 +2254,7 @@ static void __exit vfio_cleanup(void)
#endif
idr_destroy(&vfio.group_idr);
cdev_del(&vfio.group_cdev);
- unregister_chrdev_region(vfio.group_devt, MINORMASK);
+ unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
class_destroy(vfio.class);
vfio.class = NULL;
misc_deregister(&vfio_dev);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c424913324e3..8dbb270998f4 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1235,7 +1235,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
}
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
- table_group->ops->unset_window(table_group, i);
+ if (container->tables[i])
+ table_group->ops->unset_window(table_group, i);
table_group->ops->release_ownership(table_group);
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 24a129fcdd61..a2e5dc7716e2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
len, iov, 64, VHOST_ACCESS_WO);
- if (ret)
+ if (ret < 0)
return ret;
for (i = 0; i < ret; i++) {
OpenPOWER on IntegriCloud