diff options
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r-- | drivers/scsi/ufs/Kconfig | 10 | ||||
-rw-r--r-- | drivers/scsi/ufs/Makefile | 1 | ||||
-rw-r--r-- | drivers/scsi/ufs/cdns-pltfrm.c | 153 | ||||
-rw-r--r-- | drivers/scsi/ufs/ti-j721e-ufs.c | 90 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-hisi.c | 9 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-mediatek.c | 209 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-mediatek.h | 32 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom.c | 94 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom.h | 7 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-sysfs.c | 37 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-sysfs.h | 4 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs.h | 33 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs_bsg.c | 7 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs_quirks.h | 9 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-dwc.c | 2 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pltfrm.c | 5 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 1162 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.h | 101 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshci.h | 2 | ||||
-rw-r--r-- | drivers/scsi/ufs/unipro.h | 11 |
20 files changed, 1389 insertions, 589 deletions
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 0b845ab7c3bf..d14c2243e02a 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -132,6 +132,16 @@ config SCSI_UFS_HISI Select this if you have UFS controller on Hisilicon chipset. If unsure, say N. +config SCSI_UFS_TI_J721E + tristate "TI glue layer for Cadence UFS Controller" + depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST) + help + This selects driver for TI glue layer for Cadence UFS Host + Controller IP. + + Selects this if you have TI platform with UFS controller. + If unsure, say N. + config SCSI_UFS_BSG bool "Universal Flash Storage BSG device node" depends on SCSI_UFSHCD diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 2a9097939bcb..94c6c5d7334b 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o +obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c index 86dbb723f3ac..56a6a1ed5ec2 100644 --- a/drivers/scsi/ufs/cdns-pltfrm.c +++ b/drivers/scsi/ufs/cdns-pltfrm.c @@ -19,6 +19,85 @@ #define CDNS_UFS_REG_HCLKDIV 0xFC #define CDNS_UFS_REG_PHY_XCFGD1 0x113C +#define CDNS_UFS_MAX_L4_ATTRS 12 + +struct cdns_ufs_host { + /** + * cdns_ufs_dme_attr_val - for storing L4 attributes + */ + u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS]; +}; + +/** + * cdns_ufs_get_l4_attr - get L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_get_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID), + &host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID), + &host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + &host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID), + &host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS), + &host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + &host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE), + &host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + &host->cdns_ufs_dme_attr_val[11]); +} + +/** + * cdns_ufs_set_l4_attr - set L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_set_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), + host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), + host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID), + host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), + host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE), + host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + host->cdns_ufs_dme_attr_val[11]); +} /** * Sets HCLKDIV register value based on the core_clk @@ -62,23 +141,69 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) } /** - * Sets clocks used by the controller + * Called before and after HCE enable bit is set. * @hba: host controller instance - * @on: if true, enable clocks, otherwise disable * @status: notify stage (pre, post change) * * Return zero for success and non-zero for failure */ -static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on, - enum ufs_notify_change_status status) +static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) { - if ((!on) || (status == PRE_CHANGE)) + if (status != PRE_CHANGE) return 0; return cdns_ufs_set_hclkdiv(hba); } /** + * Called around hibern8 enter/exit. + * @hba: host controller instance + * @cmd: UIC Command + * @status: notify stage (pre, post change) + * + */ +static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER) + cdns_ufs_get_l4_attr(hba); + if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT) + cdns_ufs_set_l4_attr(hba); +} + +/** + * Called before and after Link startup is carried out. + * @hba: host controller instance + * @status: notify stage (pre, post change) + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status != PRE_CHANGE) + return 0; + + /* + * Some UFS devices have issues if LCC is enabled. + * So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); + + /* + * Disabling Autohibern8 feature in cadence UFS + * to mask unexpected interrupt trigger. + */ + hba->ahit = 0; + + return 0; +} + +/** * cdns_ufs_init - performs additional ufs initialization * @hba: host controller instance * @@ -87,6 +212,14 @@ static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on, static int cdns_ufs_init(struct ufs_hba *hba) { int status = 0; + struct cdns_ufs_host *host; + struct device *dev = hba->dev; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + + if (!host) + return -ENOMEM; + ufshcd_set_variant(hba, host); if (hba->vops && hba->vops->phy_initialization) status = hba->vops->phy_initialization(hba); @@ -114,14 +247,19 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba) static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = { .name = "cdns-ufs-pltfm", - .setup_clocks = cdns_ufs_setup_clocks, + .init = cdns_ufs_init, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, + .hibern8_notify = cdns_ufs_hibern8_notify, }; static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = { .name = "cdns-ufs-pltfm", .init = cdns_ufs_init, - .setup_clocks = cdns_ufs_setup_clocks, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, .phy_initialization = cdns_ufs_m31_16nm_phy_initialization, + .hibern8_notify = cdns_ufs_hibern8_notify, }; static const struct of_device_id cdns_ufs_of_match[] = { @@ -187,6 +325,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { static struct platform_driver cdns_ufs_pltfrm_driver = { .probe = cdns_ufs_pltfrm_probe, .remove = cdns_ufs_pltfrm_remove, + .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "cdns-ufshcd", .pm = &cdns_ufs_dev_pm_ops, diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c new file mode 100644 index 000000000000..5216d228cdd9 --- /dev/null +++ b/drivers/scsi/ufs/ti-j721e-ufs.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#define TI_UFS_SS_CTRL 0x4 +#define TI_UFS_SS_RST_N_PCS BIT(0) +#define TI_UFS_SS_CLK_26MHZ BIT(4) + +static int ti_j721e_ufs_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned long clk_rate; + void __iomem *regbase; + struct clk *clk; + u32 reg = 0; + int ret; + + regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regbase)) + return PTR_ERR(regbase); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + /* Select MPHY refclk frequency */ + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "Cannot claim MPHY clock.\n"); + return PTR_ERR(clk); + } + clk_rate = clk_get_rate(clk); + if (clk_rate == 26000000) + reg |= TI_UFS_SS_CLK_26MHZ; + devm_clk_put(dev, clk); + + /* Take UFS slave device out of reset */ + reg |= TI_UFS_SS_RST_N_PCS; + writel(reg, regbase + TI_UFS_SS_CTRL); + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, + dev); + if (ret) { + dev_err(dev, "failed to populate child nodes %d\n", ret); + pm_runtime_put_sync(dev); + } + + return ret; +} + +static int ti_j721e_ufs_remove(struct platform_device *pdev) +{ + of_platform_depopulate(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +static const struct of_device_id ti_j721e_ufs_of_match[] = { + { + .compatible = "ti,j721e-ufs", + }, + { }, +}; + +static struct platform_driver ti_j721e_ufs_driver = { + .probe = ti_j721e_ufs_probe, + .remove = ti_j721e_ufs_remove, + .driver = { + .name = "ti-j721e-ufs", + .of_match_table = ti_j721e_ufs_of_match, + }, +}; +module_platform_driver(ti_j721e_ufs_driver); + +MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); +MODULE_DESCRIPTION("TI UFS host controller glue driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index f4d1dca962c4..5d6487350a6c 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -447,17 +447,12 @@ static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) static int ufs_hisi_get_resource(struct ufs_hisi_host *host) { - struct resource *mem_res; struct device *dev = host->hba->dev; struct platform_device *pdev = to_platform_device(dev); /* get resource of ufs sys ctrl */ - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res); - if (IS_ERR(host->ufs_sys_ctrl)) - return PTR_ERR(host->ufs_sys_ctrl); - - return 0; + host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1); + return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl); } static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index 0f6ff33ce52e..53eae5fe2ade 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -6,16 +6,30 @@ * Peter Wang <peter.wang@mediatek.com> */ +#include <linux/arm-smccc.h> +#include <linux/bitfield.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> #include "ufshcd.h" #include "ufshcd-pltfrm.h" +#include "ufs_quirks.h" #include "unipro.h" #include "ufs-mediatek.h" +#define ufs_mtk_smc(cmd, val, res) \ + arm_smccc_smc(MTK_SIP_UFS_CONTROL, \ + cmd, val, 0, 0, 0, 0, 0, &(res)) + +#define ufs_mtk_ref_clk_notify(on, res) \ + ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res) + static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) { u32 tmp; @@ -81,6 +95,49 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba) return err; } +static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; + unsigned long timeout; + u32 value; + + if (host->ref_clk_enabled == on) + return 0; + + if (on) { + ufs_mtk_ref_clk_notify(on, res); + ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); + } else { + ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); + } + + /* Wait for ack */ + timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS); + do { + value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); + + /* Wait until ack bit equals to req bit */ + if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST)) + goto out; + + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); + + ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res); + + return -ETIMEDOUT; + +out: + host->ref_clk_enabled = on; + if (!on) + ufs_mtk_ref_clk_notify(on, res); + + return 0; +} + /** * ufs_mtk_setup_clocks - enables/disable clocks * @hba: host controller instance @@ -105,12 +162,16 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, switch (status) { case PRE_CHANGE: - if (!on) + if (!on) { + ufs_mtk_setup_ref_clk(hba, on); ret = phy_power_off(host->mphy); + } break; case POST_CHANGE: - if (on) + if (on) { ret = phy_power_on(host->mphy); + ufs_mtk_setup_ref_clk(hba, on); + } break; } @@ -147,6 +208,12 @@ static int ufs_mtk_init(struct ufs_hba *hba) if (err) goto out_variant_clear; + /* Enable runtime autosuspend */ + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + /* Enable clock-gating */ + hba->caps |= UFSHCD_CAP_CLK_GATING; + /* * ufshcd_vops_init() is invoked after * ufshcd_setup_clock(true) in ufshcd_hba_init() thus @@ -235,6 +302,23 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) return ret; } +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + unsigned long flags; + u32 ah_ms; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) + ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, + hba->ahit); + else + ah_ms = 10; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = ah_ms + 5; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } +} + static int ufs_mtk_post_link(struct ufs_hba *hba) { /* disable device LCC */ @@ -243,6 +327,15 @@ static int ufs_mtk_post_link(struct ufs_hba *hba) /* enable unipro clock gating feature */ ufs_mtk_cfg_unipro_cg(hba, true); + /* configure auto-hibern8 timer to 10ms */ + if (ufshcd_is_auto_hibern8_supported(hba)) { + ufshcd_auto_hibern8_update(hba, + FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3)); + } + + ufs_mtk_setup_clk_gating(hba); + return 0; } @@ -266,12 +359,86 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, return ret; } +static void ufs_mtk_device_reset(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + ufs_mtk_device_reset_ctrl(0, res); + + /* + * The reset signal is active low. UFS devices shall detect + * more than or equal to 1us of positive or negative RST_n + * pulse width. + * + * To be on safe side, keep the reset low for at least 10us. + */ + usleep_range(10, 15); + + ufs_mtk_device_reset_ctrl(1, res); + + /* Some devices may need time to respond to rst_n */ + usleep_range(10000, 15000); + + dev_info(hba->dev, "device reset done\n"); +} + +static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_hba_enable(hba); + if (err) + return err; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 0); + if (err) + return err; + + err = ufshcd_uic_hibern8_exit(hba); + if (!err) + ufshcd_set_link_active(hba); + else + return err; + + err = ufshcd_make_hba_operational(hba); + if (err) + return err; + + return 0; +} + +static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 1); + if (err) { + /* Resume UniPro state for following error recovery */ + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 0); + return err; + } + + return 0; +} + static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { + int err; struct ufs_mtk_host *host = ufshcd_get_variant(hba); - if (ufshcd_is_link_hibern8(hba)) + if (ufshcd_is_link_hibern8(hba)) { + err = ufs_mtk_link_set_lpm(hba); + if (err) + return -EAGAIN; phy_power_off(host->mphy); + ufs_mtk_setup_ref_clk(hba, false); + } return 0; } @@ -279,9 +446,40 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); + int err; - if (ufshcd_is_link_hibern8(hba)) + if (ufshcd_is_link_hibern8(hba)) { + ufs_mtk_setup_ref_clk(hba, true); phy_power_on(host->mphy); + err = ufs_mtk_link_set_hpm(hba); + if (err) + return err; + } + + return 0; +} + +static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl "); + + ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); + + ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, + REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4, + "MPHY Ctrl "); + + /* Direct debugging information to REG_MTK_PROBE */ + ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); + ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); +} + +static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + + if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); return 0; } @@ -298,8 +496,11 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .setup_clocks = ufs_mtk_setup_clocks, .link_startup_notify = ufs_mtk_link_startup_notify, .pwr_change_notify = ufs_mtk_pwr_change_notify, + .apply_dev_quirks = ufs_mtk_apply_dev_quirks, .suspend = ufs_mtk_suspend, .resume = ufs_mtk_resume, + .dbg_register_dump = ufs_mtk_dbg_register_dump, + .device_reset = ufs_mtk_device_reset, }; /** diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h index 19f8c42fe06f..fccdd979d6fb 100644 --- a/drivers/scsi/ufs/ufs-mediatek.h +++ b/drivers/scsi/ufs/ufs-mediatek.h @@ -6,6 +6,30 @@ #ifndef _UFS_MEDIATEK_H #define _UFS_MEDIATEK_H +#include <linux/bitops.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> + +/* + * Vendor specific UFSHCI Registers + */ +#define REG_UFS_REFCLK_CTRL 0x144 +#define REG_UFS_EXTREG 0x2100 +#define REG_UFS_MPHYCTRL 0x2200 +#define REG_UFS_REJECT_MON 0x22AC +#define REG_UFS_DEBUG_SEL 0x22C0 +#define REG_UFS_PROBE 0x22C8 + +/* + * Ref-clk control + * + * Values for register REG_UFS_REFCLK_CTRL + */ +#define REFCLK_RELEASE 0x0 +#define REFCLK_REQUEST BIT(0) +#define REFCLK_ACK BIT(1) + +#define REFCLK_REQ_TIMEOUT_MS 3 + /* * Vendor specific pre-defined parameters */ @@ -30,6 +54,13 @@ #define VS_UNIPROPOWERDOWNCONTROL 0xD0A8 /* + * SiP commands + */ +#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276) +#define UFS_MTK_SIP_DEVICE_RESET BIT(1) +#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) + +/* * VS_DEBUGCLOCKENABLE */ enum { @@ -48,6 +79,7 @@ enum { struct ufs_mtk_host { struct ufs_hba *hba; struct phy *mphy; + bool ref_clk_enabled; }; #endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index ee4b1da1e223..c69c29a1ceb9 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -8,6 +8,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> +#include <linux/gpio/consumer.h> #include <linux/reset-controller.h> #include "ufshcd.h" @@ -245,6 +246,44 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) mb(); } +/** + * ufs_qcom_host_reset - reset host controller and PHY + */ +static int ufs_qcom_host_reset(struct ufs_hba *hba) +{ + int ret = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!host->core_reset) { + dev_warn(hba->dev, "%s: reset control not set\n", __func__); + goto out; + } + + ret = reset_control_assert(host->core_reset); + if (ret) { + dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", + __func__, ret); + goto out; + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + usleep_range(200, 210); + + ret = reset_control_deassert(host->core_reset); + if (ret) + dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", + __func__, ret); + + usleep_range(1000, 1100); + +out: + return ret; +} + static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -253,6 +292,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) ? true : false; + /* Reset UFS Host Controller and PHY */ + ret = ufs_qcom_host_reset(hba); + if (ret) + dev_warn(hba->dev, "%s: host reset returned %d\n", + __func__, ret); + if (is_rate_B) phy_set_mode(phy, PHY_MODE_UFS_HS_B); @@ -800,7 +845,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { - u32 val; struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_dev_params ufs_qcom_cap; int ret = 0; @@ -869,8 +913,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, ret = -EINVAL; } - val = ~(MAX_U32 << dev_req_params->lane_tx); - /* cache the power mode parameters to use internally */ memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); @@ -1103,6 +1145,15 @@ static int ufs_qcom_init(struct ufs_hba *hba) host->hba = hba; ufshcd_set_variant(hba, host); + /* Setup the reset control of HCI */ + host->core_reset = devm_reset_control_get(hba->dev, "rst"); + if (IS_ERR(host->core_reset)) { + err = PTR_ERR(host->core_reset); + dev_warn(dev, "Failed to get reset control %d\n", err); + host->core_reset = NULL; + err = 0; + } + /* Fire up the reset controller. Failure here is non-fatal. */ host->rcdev.of_node = dev->of_node; host->rcdev.ops = &ufs_qcom_reset_ops; @@ -1140,6 +1191,15 @@ static int ufs_qcom_init(struct ufs_hba *hba) } } + host->device_reset = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(host->device_reset)) { + err = PTR_ERR(host->device_reset); + if (err != -EPROBE_DEFER) + dev_err(dev, "failed to acquire reset gpio: %d\n", err); + goto out_variant_clear; + } + err = ufs_qcom_bus_register(host); if (err) goto out_variant_clear; @@ -1546,12 +1606,37 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) } /** + * ufs_qcom_device_reset() - toggle the (optional) device reset line + * @hba: per-adapter instance + * + * Toggles the (optional) reset line to reset the attached device. + */ +static void ufs_qcom_device_reset(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* reset gpio is optional */ + if (!host->device_reset) + return; + + /* + * The UFS device shall detect reset pulses of 1us, sleep for 10us to + * be on the safe side. + */ + gpiod_set_value_cansleep(host->device_reset, 1); + usleep_range(10, 15); + + gpiod_set_value_cansleep(host->device_reset, 0); + usleep_range(10, 15); +} + +/** * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * * The variant operations configure the necessary controller and PHY * handshake during initialization. */ -static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { +static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .name = "qcom", .init = ufs_qcom_init, .exit = ufs_qcom_exit, @@ -1565,6 +1650,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, .dbg_register_dump = ufs_qcom_dump_dbg_regs, + .device_reset = ufs_qcom_device_reset, }; /** diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 001915d1e0e4..2d95e7cc7187 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -6,6 +6,7 @@ #define UFS_QCOM_H_ #include <linux/reset-controller.h> +#include <linux/reset.h> #define MAX_UFS_QCOM_HOSTS 1 #define MAX_U32 (~(u32)0) @@ -195,6 +196,8 @@ struct ufs_qcom_testbus { u8 select_minor; }; +struct gpio_desc; + struct ufs_qcom_host { /* * Set this capability if host controller supports the QUniPro mode @@ -231,7 +234,11 @@ struct ufs_qcom_host { u32 dbg_print_en; struct ufs_qcom_testbus testbus; + /* Reset control of HCI */ + struct reset_control *core_reset; struct reset_controller_dev rcdev; + + struct gpio_desc *device_reset; }; static inline u32 diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index f478685122ff..dbdf8b01abed 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -118,23 +118,6 @@ static ssize_t spm_target_link_state_show(struct device *dev, ufs_pm_lvl_states[hba->spm_lvl].link_state)); } -static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) -{ - unsigned long flags; - - if (!ufshcd_is_auto_hibern8_supported(hba)) - return; - - spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ahit == ahit) - goto out_unlock; - hba->ahit = ahit; - if (!pm_runtime_suspended(hba->dev)) - ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); -out_unlock: - spin_unlock_irqrestore(hba->host->host_lock, flags); -} - /* Convert Auto-Hibernate Idle Timer register value to microseconds */ static int ufshcd_ahit_to_us(u32 ahit) { @@ -571,9 +554,10 @@ static ssize_t _name##_show(struct device *dev, \ int ret; \ int desc_len = QUERY_DESC_MAX_SIZE; \ u8 *desc_buf; \ + \ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ - if (!desc_buf) \ - return -ENOMEM; \ + if (!desc_buf) \ + return -ENOMEM; \ ret = ufshcd_query_descriptor_retry(hba, \ UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \ 0, 0, desc_buf, &desc_len); \ @@ -582,14 +566,13 @@ static ssize_t _name##_show(struct device *dev, \ goto out; \ } \ index = desc_buf[DEVICE_DESC_PARAM##_pname]; \ - memset(desc_buf, 0, QUERY_DESC_MAX_SIZE); \ - if (ufshcd_read_string_desc(hba, index, desc_buf, \ - QUERY_DESC_MAX_SIZE, true)) { \ - ret = -EINVAL; \ + kfree(desc_buf); \ + desc_buf = NULL; \ + ret = ufshcd_read_string_desc(hba, index, &desc_buf, \ + SD_ASCII_STD); \ + if (ret < 0) \ goto out; \ - } \ - ret = snprintf(buf, PAGE_SIZE, "%s\n", \ - desc_buf + QUERY_DESC_HDR_SIZE); \ + ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \ out: \ kfree(desc_buf); \ return ret; \ @@ -730,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev, \ struct scsi_device *sdev = to_scsi_device(dev); \ struct ufs_hba *hba = shost_priv(sdev->host); \ u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \ - if (!ufs_is_valid_unit_desc_lun(lun)) \ + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \ return -EINVAL; \ return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ lun, _duname##_DESC_PARAM##_puname, buf, _size); \ diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h index e5621e59a432..0f4e750a6748 100644 --- a/drivers/scsi/ufs/ufs-sysfs.h +++ b/drivers/scsi/ufs/ufs-sysfs.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (C) 2018 Western Digital Corporation +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Western Digital Corporation */ #ifndef __UFS_SYSFS_H__ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 99a9c4d16f6b..cfe380348bf0 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -63,7 +63,6 @@ #define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F #define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) #define UFS_UPIU_WLUN_ID (1 << 7) -#define UFS_UPIU_MAX_GENERAL_LUN 8 /* Well known logical unit id in LUN field of UPIU */ enum { @@ -499,9 +498,9 @@ struct ufs_query_res { #define UFS_VREG_VCC_MAX_UV 3600000 /* uV */ #define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */ #define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */ -#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */ -#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */ -#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */ +#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */ +#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */ +#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */ #define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */ /* @@ -530,28 +529,28 @@ struct ufs_dev_info { bool f_power_on_wp_en; /* Keeps information if any of the LU is power on write protected */ bool is_lu_power_on_wp; -}; - -#define MAX_MODEL_LEN 16 -/** - * ufs_dev_desc - ufs device details from the device descriptor - * - * @wmanufacturerid: card details - * @model: card model - */ -struct ufs_dev_desc { + /* Maximum number of general LU supported by the UFS device */ + u8 max_lu_supported; u16 wmanufacturerid; - char model[MAX_MODEL_LEN + 1]; + /*UFS device Product Name */ + u8 *model; }; /** * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor + * @dev_info: pointer of instance of struct ufs_dev_info * @lun: LU number to check * @return: true if the lun has a matching unit descriptor, false otherwise */ -static inline bool ufs_is_valid_unit_desc_lun(u8 lun) +static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, + u8 lun) { - return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN); + if (!dev_info || !dev_info->max_lu_supported) { + pr_err("Max General LU supported by UFS isn't initialized\n"); + return false; + } + + return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); } #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index a9344eb4e047..53dd87628cbe 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -98,6 +98,8 @@ static int ufs_bsg_request(struct bsg_job *job) bsg_reply->reply_payload_rcv_len = 0; + pm_runtime_get_sync(hba->dev); + msgcode = bsg_request->msgcode; switch (msgcode) { case UPIU_TRANSACTION_QUERY_REQ: @@ -135,6 +137,8 @@ static int ufs_bsg_request(struct bsg_job *job) break; } + pm_runtime_put_sync(hba->dev); + if (!desc_buff) goto out; @@ -158,6 +162,7 @@ out: /** * ufs_bsg_remove - detach and remove the added ufs-bsg node + * @hba: per adapter object * * Should be called when unloading the driver. */ @@ -198,7 +203,7 @@ int ufs_bsg_probe(struct ufs_hba *hba) bsg_dev->parent = get_device(parent); bsg_dev->release = ufs_bsg_node_release; - dev_set_name(bsg_dev, "ufs-bsg"); + dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no); ret = device_add(bsg_dev); if (ret) diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index fe6cad9b2a0d..d0ab147f98d3 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -22,16 +22,17 @@ * @quirk: device quirk */ struct ufs_dev_fix { - struct ufs_dev_desc card; + u16 wmanufacturerid; + u8 *model; unsigned int quirk; }; -#define END_FIX { { 0 }, 0 } +#define END_FIX { } /* add specific device quirk */ #define UFS_FIX(_vendor, _model, _quirk) { \ - .card.wmanufacturerid = (_vendor),\ - .card.model = (_model), \ + .wmanufacturerid = (_vendor),\ + .model = (_model), \ .quirk = (_quirk), \ } diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c index fb9e2ff4f8d2..6a901da2d15a 100644 --- a/drivers/scsi/ufs/ufshcd-dwc.c +++ b/drivers/scsi/ufs/ufshcd-dwc.c @@ -80,7 +80,7 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba) */ static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) { - const struct ufshcd_dme_attr_val setup_attrs[] = { + static const struct ufshcd_dme_attr_val setup_attrs[] = { { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL }, { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL }, { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL }, diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index d7d521b394c3..76f9be71c31b 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -391,12 +391,10 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, { struct ufs_hba *hba; void __iomem *mmio_base; - struct resource *mem_res; int irq, err; struct device *dev = &pdev->dev; - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mmio_base = devm_ioremap_resource(dev, mem_res); + mmio_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mmio_base)) { err = PTR_ERR(mmio_base); goto out; @@ -404,7 +402,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "IRQ resource not available\n"); err = -ENODEV; goto out; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index e274053109d0..abd0e6b05f79 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -88,6 +88,9 @@ /* Interrupt aggregation default timeout, unit: 40us */ #define INT_AGGR_DEF_TO 0x02 +/* default delay of autosuspend: 2000 ms */ +#define RPM_AUTOSUSPEND_DELAY_MS 2000 + #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ ({ \ int _ret; \ @@ -114,7 +117,7 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ return -EINVAL; - regs = kzalloc(len, GFP_KERNEL); + regs = kzalloc(len, GFP_ATOMIC); if (!regs) return -ENOMEM; @@ -237,17 +240,16 @@ static struct ufs_dev_fix ufs_fixups[] = { END_FIX }; -static void ufshcd_tmc_handler(struct ufs_hba *hba); +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); -static int ufshcd_probe_hba(struct ufs_hba *hba); +static int ufshcd_probe_hba(struct ufs_hba *hba, bool async); static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); @@ -263,26 +265,18 @@ static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) return tag >= 0 && tag < hba->nutrs; } -static inline int ufshcd_enable_irq(struct ufs_hba *hba) +static inline void ufshcd_enable_irq(struct ufs_hba *hba) { - int ret = 0; - if (!hba->is_irq_enabled) { - ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, - hba); - if (ret) - dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", - __func__, ret); + enable_irq(hba->irq); hba->is_irq_enabled = true; } - - return ret; } static inline void ufshcd_disable_irq(struct ufs_hba *hba) { if (hba->is_irq_enabled) { - free_irq(hba->irq, hba); + disable_irq(hba->irq); hba->is_irq_enabled = false; } } @@ -299,16 +293,6 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba) scsi_block_requests(hba->host); } -/* replace non-printable or non-ASCII characters with spaces */ -static inline void ufshcd_remove_non_printable(char *val) -{ - if (!val) - return; - - if (*val < 0x20 || *val > 0x7e) - *val = ' '; -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -342,27 +326,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, u8 opcode = 0; u32 intr, doorbell; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct scsi_cmnd *cmd = lrbp->cmd; int transfer_len = -1; if (!trace_ufshcd_command_enabled()) { /* trace UPIU W/O tracing command */ - if (lrbp->cmd) + if (cmd) ufshcd_add_cmd_upiu_trace(hba, tag, str); return; } - if (lrbp->cmd) { /* data phase exists */ + if (cmd) { /* data phase exists */ /* trace UPIU also */ ufshcd_add_cmd_upiu_trace(hba, tag, str); - opcode = (u8)(*lrbp->cmd->cmnd); + opcode = cmd->cmnd[0]; if ((opcode == READ_10) || (opcode == WRITE_10)) { /* * Currently we only fully trace read(10) and write(10) * commands */ - if (lrbp->cmd->request && lrbp->cmd->request->bio) - lba = - lrbp->cmd->request->bio->bi_iter.bi_sector; + if (cmd->request && cmd->request->bio) + lba = cmd->request->bio->bi_iter.bi_sector; transfer_len = be32_to_cpu( lrbp->ucd_req_ptr->sc.exp_data_transfer_len); } @@ -390,24 +374,25 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba) } } -static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, - struct ufs_uic_err_reg_hist *err_hist, char *err_name) +static void ufshcd_print_err_hist(struct ufs_hba *hba, + struct ufs_err_reg_hist *err_hist, + char *err_name) { int i; bool found = false; - for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) { - int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH; + for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) { + int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH; - if (err_hist->reg[p] == 0) + if (err_hist->tstamp[p] == 0) continue; - dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i, + dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, err_hist->reg[p], ktime_to_us(err_hist->tstamp[p])); found = true; } if (!found) - dev_err(hba->dev, "No record of %s uic errors\n", err_name); + dev_err(hba->dev, "No record of %s\n", err_name); } static void ufshcd_print_host_regs(struct ufs_hba *hba) @@ -423,16 +408,26 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba) ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), hba->ufs_stats.hibern8_exit_cnt); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err"); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err"); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err"); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err"); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err, + "auto_hibern8_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err, + "link_startup_fail"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err, + "suspend_fail"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort"); ufshcd_print_clk_freqs(hba); - if (hba->vops && hba->vops->dbg_register_dump) - hba->vops->dbg_register_dump(hba); + ufshcd_vops_dbg_register_dump(hba); } static @@ -492,8 +487,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) static void ufshcd_print_host_state(struct ufs_hba *hba) { dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); - dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", - hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", + hba->outstanding_reqs, hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", @@ -641,40 +636,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) } /** - * ufshcd_get_tm_free_slot - get a free slot for task management request - * @hba: per adapter instance - * @free_slot: pointer to variable with available slot value - * - * Get a free tag and lock it until ufshcd_put_tm_slot() is called. - * Returns 0 if free slot is not available, else return 1 with tag value - * in @free_slot. - */ -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) -{ - int tag; - bool ret = false; - - if (!free_slot) - goto out; - - do { - tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); - if (tag >= hba->nutmrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); - - *free_slot = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) -{ - clear_bit_unlock(slot, &hba->tm_slots_in_use); -} - -/** * ufshcd_utrl_clear - Clear a bit in UTRLCLR register * @hba: per adapter instance * @pos: position of the bit to be cleared @@ -1268,6 +1229,24 @@ out: return ret; } +static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved) +{ + int *busy = priv; + + WARN_ON_ONCE(reserved); + (*busy)++; + return false; +} + +/* Whether or not any tag is in use by a request that is in progress. */ +static bool ufshcd_any_tag_in_use(struct ufs_hba *hba) +{ + struct request_queue *q = hba->cmd_queue; + int busy = 0; + + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy); + return busy; +} static int ufshcd_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) @@ -1485,6 +1464,8 @@ static void ufshcd_ungate_work(struct work_struct *work) spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_setup_clocks(hba, true); + ufshcd_enable_irq(hba); + /* Exit from hibern8 */ if (ufshcd_can_hibern8_during_gating(hba)) { /* Prevent gating in this path */ @@ -1605,7 +1586,7 @@ static void ufshcd_gate_work(struct work_struct *work) * state to CLKS_ON. */ if (hba->clk_gating.is_suspended || - (hba->clk_gating.state == REQ_CLKS_ON)) { + (hba->clk_gating.state != REQ_CLKS_OFF)) { hba->clk_gating.state = CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); @@ -1614,7 +1595,7 @@ static void ufshcd_gate_work(struct work_struct *work) if (hba->clk_gating.active_reqs || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done) goto rel_lock; @@ -1631,6 +1612,8 @@ static void ufshcd_gate_work(struct work_struct *work) ufshcd_set_link_hibern8(hba); } + ufshcd_disable_irq(hba); + if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); else @@ -1668,7 +1651,7 @@ static void __ufshcd_release(struct ufs_hba *hba) if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done || ufshcd_eh_in_progress(hba)) return; @@ -1876,12 +1859,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { hba->lrb[task_tag].issue_time_stamp = ktime_get(); hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); + ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); - ufshcd_add_command_trace(hba, task_tag, "send"); } /** @@ -1933,8 +1916,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); } else { dev_warn(hba->dev, - "%s: Response size is bigger than buffer", - __func__); + "%s: rsp size %d is bigger than buffer size %d", + __func__, resp_len, buf_len); return -EINVAL; } } @@ -2234,6 +2217,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, static void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) { + struct scsi_cmnd *cmd = lrbp->cmd; struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; unsigned short cdb_len; @@ -2247,12 +2231,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) /* Total EHS length and Data segment length will be zero */ ucd_req_ptr->header.dword_2 = 0; - ucd_req_ptr->sc.exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->sdb.length); + ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); - cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE); + cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); - memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len); + memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } @@ -2438,22 +2421,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hba->req_abort_count = 0; - /* acquire the tag to make sure device cmds don't use it */ - if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { - /* - * Dev manage command in progress, requeue the command. - * Requeuing the command helps in cases where the request *may* - * find different tag instead of waiting for dev manage command - * completion. - */ - err = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - err = ufshcd_hold(hba, true); if (err) { err = SCSI_MLQUEUE_HOST_BUSY; - clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } WARN_ON(hba->clk_gating.state != CLKS_ON); @@ -2474,7 +2444,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_map_sg(hba, lrbp); if (err) { lrbp->cmd = NULL; - clear_bit_unlock(tag, &hba->lrb_in_use); + ufshcd_release(hba); goto out; } /* Make sure descriptors are ready before ringing the doorbell */ @@ -2622,44 +2592,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, } /** - * ufshcd_get_dev_cmd_tag - Get device management command tag - * @hba: per-adapter instance - * @tag_out: pointer to variable with available slot value - * - * Get a free slot and lock it until device management command - * completes. - * - * Returns false if free slot is unavailable for locking, else - * return true with tag value in @tag. - */ -static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) -{ - int tag; - bool ret = false; - unsigned long tmp; - - if (!tag_out) - goto out; - - do { - tmp = ~hba->lrb_in_use; - tag = find_last_bit(&tmp, hba->nutrs); - if (tag >= hba->nutrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); - - *tag_out = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) -{ - clear_bit_unlock(tag, &hba->lrb_in_use); -} - -/** * ufshcd_exec_dev_cmd - API for sending device management requests * @hba: UFS hba * @cmd_type: specifies the type (NOP, Query...) @@ -2671,6 +2603,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err; int tag; @@ -2684,7 +2618,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by SCSI request timeout. */ - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_unlock; + } + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -2709,8 +2649,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, err ? "query_complete_err" : "query_complete"); out_put_tag: - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); +out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -2913,7 +2853,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba, int ret = 0; u32 retries; - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { ret = ufshcd_query_attr(hba, opcode, idn, index, selector, attr_val); if (ret) @@ -2984,10 +2924,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out_unlock; } - hba->dev_cmd.query.descriptor = NULL; *buf_len = be16_to_cpu(response->upiu_res.length); out_unlock: + hba->dev_cmd.query.descriptor = NULL; mutex_unlock(&hba->dev_cmd.lock); out: ufshcd_release(hba); @@ -3199,67 +3139,85 @@ out: static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, - u8 *buf, + void *buf, u32 size) { return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); } -static inline int ufshcd_read_power_desc(struct ufs_hba *hba, - u8 *buf, - u32 size) -{ - return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); -} -static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) +/** + * struct uc_string_id - unicode string + * + * @len: size of this descriptor inclusive + * @type: descriptor type + * @uc: unicode string character + */ +struct uc_string_id { + u8 len; + u8 type; + wchar_t uc[0]; +} __packed; + +/* replace non-printable or non-ASCII characters with spaces */ +static inline char ufshcd_remove_non_printable(u8 ch) { - return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); + return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; } /** * ufshcd_read_string_desc - read string descriptor * @hba: pointer to adapter instance * @desc_index: descriptor index - * @buf: pointer to buffer where descriptor would be read - * @size: size of buf + * @buf: pointer to buffer where descriptor would be read, + * the caller should free the memory. * @ascii: if true convert from unicode to ascii characters + * null terminated string. * - * Return 0 in case of success, non-zero otherwise + * Return: + * * string size on success. + * * -ENOMEM: on allocation failure + * * -EINVAL: on a wrong parameter */ -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii) +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + u8 **buf, bool ascii) { - int err = 0; + struct uc_string_id *uc_str; + u8 *str; + int ret; - err = ufshcd_read_desc(hba, - QUERY_DESC_IDN_STRING, desc_index, buf, size); + if (!buf) + return -EINVAL; - if (err) { - dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", - __func__, QUERY_REQ_RETRIES, err); + uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!uc_str) + return -ENOMEM; + + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, + desc_index, uc_str, + QUERY_DESC_MAX_SIZE); + if (ret < 0) { + dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", + QUERY_REQ_RETRIES, ret); + str = NULL; + goto out; + } + + if (uc_str->len <= QUERY_DESC_HDR_SIZE) { + dev_dbg(hba->dev, "String Desc is of zero length\n"); + str = NULL; + ret = 0; goto out; } if (ascii) { - int desc_len; - int ascii_len; + ssize_t ascii_len; int i; - char *buff_ascii; - - desc_len = buf[0]; /* remove header and divide by 2 to move from UTF16 to UTF8 */ - ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; - if (size < ascii_len + QUERY_DESC_HDR_SIZE) { - dev_err(hba->dev, "%s: buffer allocated size is too small\n", - __func__); - err = -ENOMEM; - goto out; - } - - buff_ascii = kmalloc(ascii_len, GFP_KERNEL); - if (!buff_ascii) { - err = -ENOMEM; + ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; + str = kzalloc(ascii_len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; goto out; } @@ -3267,22 +3225,28 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, * the descriptor contains string in UTF16 format * we need to convert to utf-8 so it can be displayed */ - utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], - desc_len - QUERY_DESC_HDR_SIZE, - UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + ret = utf16s_to_utf8s(uc_str->uc, + uc_str->len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, str, ascii_len); /* replace non-printable or non-ASCII characters with spaces */ - for (i = 0; i < ascii_len; i++) - ufshcd_remove_non_printable(&buff_ascii[i]); + for (i = 0; i < ret; i++) + str[i] = ufshcd_remove_non_printable(str[i]); + + str[ret++] = '\0'; - memset(buf + QUERY_DESC_HDR_SIZE, 0, - size - QUERY_DESC_HDR_SIZE); - memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); - buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; - kfree(buff_ascii); + } else { + str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + ret = uc_str->len; } out: - return err; + *buf = str; + kfree(uc_str); + return ret; } /** @@ -3305,7 +3269,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, * Unit descriptors are only available for general purpose LUs (LUN id * from 0 to 7) and RPMB Well known LU. */ - if (!ufs_is_valid_unit_desc_lun(lun)) + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) return -EOPNOTSUPP; return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, @@ -3819,6 +3783,9 @@ static int ufshcd_link_recovery(struct ufs_hba *hba) ufshcd_set_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Reset the attached device */ + ufshcd_vops_device_reset(hba); + ret = ufshcd_host_reset_and_restore(hba); spin_lock_irqsave(hba->host->host_lock, flags); @@ -3848,15 +3815,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) ktime_to_us(ktime_sub(ktime_get(), start)), ret); if (ret) { + int err; + dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", __func__, ret); /* - * If link recovery fails then return error so that caller - * don't retry the hibern8 enter again. + * If link recovery fails then return error code returned from + * ufshcd_link_recovery(). + * If link recovery succeeds then return -EAGAIN to attempt + * hibern8 enter retry again. */ - if (ufshcd_link_recovery(hba)) - ret = -ENOLINK; + err = ufshcd_link_recovery(hba); + if (err) { + dev_err(hba->dev, "%s: link recovery failed", __func__); + ret = err; + } else { + ret = -EAGAIN; + } } else ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, POST_CHANGE); @@ -3870,14 +3846,14 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { ret = __ufshcd_uic_hibern8_enter(hba); - if (!ret || ret == -ENOLINK) + if (!ret) goto out; } out: return ret; } -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) { struct uic_command uic_cmd = {0}; int ret; @@ -3903,8 +3879,27 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) return ret; } +EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); -static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) +{ + unsigned long flags; + + if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ahit == ahit) + goto out_unlock; + hba->ahit = ahit; + if (!pm_runtime_suspended(hba->dev)) + ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); + +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) { unsigned long flags; @@ -4043,6 +4038,26 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), pwr_mode->hs_rate); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | pwr_mode->pwr_tx); @@ -4136,7 +4151,7 @@ out: * * Returns 0 on success, non-zero value on failure */ -static int ufshcd_make_hba_operational(struct ufs_hba *hba) +int ufshcd_make_hba_operational(struct ufs_hba *hba) { int err = 0; u32 reg; @@ -4182,6 +4197,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) out: return err; } +EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); /** * ufshcd_hba_stop - Send controller to reset state @@ -4214,12 +4230,6 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) { int retry; - /* - * msleep of 1 and 5 used in this function might result in msleep(20), - * but it was necessary to send the UFS FPGA to reset mode during - * development and testing of this driver. msleep can be changed to - * mdelay and retry count can be reduced based on the controller. - */ if (!ufshcd_is_hba_active(hba)) /* change controller state to "reset state" */ ufshcd_hba_stop(hba, true); @@ -4242,7 +4252,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) * instruction might be read back. * This delay can be changed based on the controller. */ - msleep(1); + usleep_range(1000, 1100); /* wait for the host controller to complete initialization */ retry = 10; @@ -4254,7 +4264,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) "Controller enable failed\n"); return -EIO; } - msleep(5); + usleep_range(5000, 5100); } /* enable UIC related interrupts */ @@ -4265,7 +4275,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) return 0; } -static int ufshcd_hba_enable(struct ufs_hba *hba) +int ufshcd_hba_enable(struct ufs_hba *hba) { int ret; @@ -4290,6 +4300,8 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) return ret; } +EXPORT_SYMBOL_GPL(ufshcd_hba_enable); + static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) { int tx_lanes, i, err = 0; @@ -4326,6 +4338,15 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) return ufshcd_disable_tx_lcc(hba, true); } +void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, + u32 reg) +{ + reg_hist->reg[reg_hist->pos] = reg; + reg_hist->tstamp[reg_hist->pos] = ktime_get(); + reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH; +} +EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist); + /** * ufshcd_link_startup - Initialize unipro link startup * @hba: per adapter instance @@ -4353,6 +4374,8 @@ link_startup: /* check if device is detected by inter-connect layer */ if (!ret && !ufshcd_is_device_present(hba)) { + ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, + 0); dev_err(hba->dev, "%s: Device not present\n", __func__); ret = -ENXIO; goto out; @@ -4363,13 +4386,19 @@ link_startup: * but we can't be sure if the link is up until link startup * succeeds. So reset the local Uni-Pro and try again. */ - if (ret && ufshcd_hba_enable(hba)) + if (ret && ufshcd_hba_enable(hba)) { + ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, + (u32)ret); goto out; + } } while (ret && retries--); - if (ret) + if (ret) { /* failed to get the link up... retire */ + ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, + (u32)ret); goto out; + } if (link_startup_again) { link_startup_again = false; @@ -4499,7 +4528,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba, * protected so skip reading bLUWriteProtect parameter for * it. For other W-LUs, UNIT DESCRIPTOR is not available. */ - else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) + else if (lun >= hba->dev_info.max_lu_supported) ret = -ENOTSUPP; else ret = ufshcd_read_unit_desc_param(hba, @@ -4546,6 +4575,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ sdev->use_10_for_ms = 1; + /* DBD field should be set to 1 in mode sense(10) */ + sdev->set_dbd_for_ms = 1; + /* allow SCSI layer to restart the device in case of errors */ sdev->allow_restart = 1; @@ -4584,9 +4616,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) */ static int ufshcd_slave_configure(struct scsi_device *sdev) { + struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); + + if (ufshcd_is_rpm_autosuspend_allowed(hba)) + sdev->rpm_autosuspend = 1; + return 0; } @@ -4732,7 +4769,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } /* end of switch */ - if (host_byte(result) != DID_OK) + if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); return result; } @@ -4741,19 +4778,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * ufshcd_uic_cmd_compl - handle completion of uic command * @hba: per adapter instance * @intr_status: interrupt status generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { + irqreturn_t retval = IRQ_NONE; + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { hba->active_uic_cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); hba->active_uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); complete(&hba->active_uic_cmd->done); + retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { complete(hba->uic_async_done); + retval = IRQ_HANDLED; + } + return retval; } /** @@ -4779,12 +4826,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, cmd->result = result; /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; - clear_bit_unlock(index, &hba->lrb_in_use); + lrbp->compl_time_stamp = ktime_get(); /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); __ufshcd_release(hba); } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { + lrbp->compl_time_stamp = ktime_get(); if (hba->dev_cmd.complete) { ufshcd_add_command_trace(hba, index, "dev_complete"); @@ -4793,24 +4841,23 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, } if (ufshcd_is_clkscaling_supported(hba)) hba->clk_scaling.active_reqs--; - - lrbp->compl_time_stamp = ktime_get(); } /* clear corresponding bits of completed commands */ hba->outstanding_reqs ^= completed_reqs; ufshcd_clk_scaling_update_busy(hba); - - /* we might have free'd some tags above */ - wake_up(&hba->dev_cmd.tag_wq); } /** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) { unsigned long completed_reqs; u32 tr_doorbell; @@ -4829,7 +4876,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - __ufshcd_transfer_req_compl(hba, completed_reqs); + if (completed_reqs) { + __ufshcd_transfer_req_compl(hba, completed_reqs); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } } /** @@ -4967,6 +5019,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) hba->auto_bkops_enabled = false; trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); + hba->is_urgent_bkops_lvl_checked = false; out: return err; } @@ -4991,6 +5044,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; ufshcd_disable_auto_bkops(hba); } + hba->is_urgent_bkops_lvl_checked = false; } static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) @@ -5037,6 +5091,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, err = ufshcd_enable_auto_bkops(hba); else err = ufshcd_disable_auto_bkops(hba); + hba->urgent_bkops_lvl = curr_status; out: return err; } @@ -5114,7 +5169,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eeh_work); pm_runtime_get_sync(hba->dev); - scsi_block_requests(hba->host); + ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", @@ -5128,7 +5183,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_bkops_exception_event_handler(hba); out: - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); pm_runtime_put_sync(hba->dev); return; } @@ -5262,8 +5317,8 @@ static void ufshcd_err_handler(struct work_struct *work) /* * if host reset is required then skip clearing the pending - * transfers forcefully because they will automatically get - * cleared after link startup. + * transfers forcefully because they will get cleared during + * host reset and restore */ if (needs_reset) goto skip_pending_xfer_clear; @@ -5345,72 +5400,80 @@ out: pm_runtime_put_sync(hba->dev); } -static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist, - u32 reg) -{ - reg_hist->reg[reg_hist->pos] = reg; - reg_hist->tstamp[reg_hist->pos] = ktime_get(); - reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH; -} - /** * ufshcd_update_uic_error - check and set fatal UIC error flags. * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_update_uic_error(struct ufs_hba *hba) +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) { u32 reg; + irqreturn_t retval = IRQ_NONE; /* PHY layer lane error */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); /* Ignore LINERESET indication, as this is not an error */ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && - (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { + (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { /* * To know whether this error is fatal or not, DB timeout * must be checked but this error is handled separately. */ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); - ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg); + ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg); + retval |= IRQ_HANDLED; } /* PA_INIT_ERROR is fatal and needs UIC reset */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); - if (reg) - ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg); - - if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) - hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; - else if (hba->dev_quirks & - UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { - if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) - hba->uic_error |= - UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; - else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) - hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + if ((reg & UIC_DATA_LINK_LAYER_ERROR) && + (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg); + + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } + retval |= IRQ_HANDLED; } /* UIC NL/TL/DME errors needs software retry */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); - if (reg) { - ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg); + if ((reg & UIC_NETWORK_LAYER_ERROR) && + (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg); hba->uic_error |= UFSHCD_UIC_NL_ERROR; + retval |= IRQ_HANDLED; } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); - if (reg) { - ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg); + if ((reg & UIC_TRANSPORT_LAYER_ERROR) && + (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg); hba->uic_error |= UFSHCD_UIC_TL_ERROR; + retval |= IRQ_HANDLED; } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); - if (reg) { - ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg); + if ((reg & UIC_DME_ERROR) && + (reg & UIC_DME_ERROR_CODE_MASK)) { + ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg); hba->uic_error |= UFSHCD_UIC_DME_ERROR; + retval |= IRQ_HANDLED; } dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", __func__, hba->uic_error); + return retval; } static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, @@ -5433,17 +5496,24 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, /** * ufshcd_check_errors - Check for errors that need s/w attention * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_check_errors(struct ufs_hba *hba) +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) { bool queue_eh_work = false; + irqreturn_t retval = IRQ_NONE; - if (hba->errors & INT_FATAL_ERRORS) + if (hba->errors & INT_FATAL_ERRORS) { + ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors); queue_eh_work = true; + } if (hba->errors & UIC_ERROR) { hba->uic_error = 0; - ufshcd_update_uic_error(hba); + retval = ufshcd_update_uic_error(hba); if (hba->uic_error) queue_eh_work = true; } @@ -5454,6 +5524,8 @@ static void ufshcd_check_errors(struct ufs_hba *hba) __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? "Enter" : "Exit", hba->errors, ufshcd_get_upmcrs(hba)); + ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err, + hba->errors); queue_eh_work = true; } @@ -5489,6 +5561,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) } schedule_work(&hba->eh_work); } + retval |= IRQ_HANDLED; } /* * if (!queue_eh_work) - @@ -5496,44 +5569,81 @@ static void ufshcd_check_errors(struct ufs_hba *hba) * itself without s/w intervention or errors that will be * handled by the SCSI core layer. */ + return retval; +} + +struct ctm_info { + struct ufs_hba *hba; + unsigned long pending; + unsigned int ncpl; +}; + +static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved) +{ + struct ctm_info *const ci = priv; + struct completion *c; + + WARN_ON_ONCE(reserved); + if (test_bit(req->tag, &ci->pending)) + return true; + ci->ncpl++; + c = req->end_io_data; + if (c) + complete(c); + return true; } /** * ufshcd_tmc_handler - handle task management function completion * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_tmc_handler(struct ufs_hba *hba) +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) { - u32 tm_doorbell; + struct request_queue *q = hba->tmf_queue; + struct ctm_info ci = { + .hba = hba, + .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL), + }; - tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); - hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; - wake_up(&hba->tm_wq); + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); + return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; } /** * ufshcd_sl_intr - Interrupt service routine * @hba: per adapter instance * @intr_status: contains interrupts generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) { + irqreturn_t retval = IRQ_NONE; + hba->errors = UFSHCD_ERROR_MASK & intr_status; if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); if (hba->errors) - ufshcd_check_errors(hba); + retval |= ufshcd_check_errors(hba); if (intr_status & UFSHCD_UIC_MASK) - ufshcd_uic_cmd_compl(hba, intr_status); + retval |= ufshcd_uic_cmd_compl(hba, intr_status); if (intr_status & UTP_TASK_REQ_COMPL) - ufshcd_tmc_handler(hba); + retval |= ufshcd_tmc_handler(hba); if (intr_status & UTP_TRANSFER_REQ_COMPL) - ufshcd_transfer_req_compl(hba); + retval |= ufshcd_transfer_req_compl(hba); + + return retval; } /** @@ -5541,8 +5651,9 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) * @irq: irq number * @__hba: pointer to adapter instance * - * Returns IRQ_HANDLED - If interrupt is valid - * IRQ_NONE - If invalid interrupt + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { @@ -5565,14 +5676,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); if (intr_status) ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); - if (enabled_intr_status) { - ufshcd_sl_intr(hba, enabled_intr_status); - retval = IRQ_HANDLED; - } + if (enabled_intr_status) + retval |= ufshcd_sl_intr(hba, enabled_intr_status); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); } while (intr_status && --retries); + if (retval == IRQ_NONE) { + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", + __func__, intr_status); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); + } + spin_unlock(hba->host->host_lock); return retval; } @@ -5601,7 +5716,10 @@ out: static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, struct utp_task_req_desc *treq, u8 tm_function) { + struct request_queue *q = hba->tmf_queue; struct Scsi_Host *host = hba->host; + DECLARE_COMPLETION_ONSTACK(wait); + struct request *req; unsigned long flags; int free_slot, task_tag, err; @@ -5610,7 +5728,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by %TM_CMD_TIMEOUT. */ - wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED); + req->end_io_data = &wait; + free_slot = req->tag; + WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); @@ -5636,10 +5757,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); /* wait until the task management command is completed */ - err = wait_event_timeout(hba->tm_wq, - test_bit(free_slot, &hba->tm_condition), + err = wait_for_completion_io_timeout(&wait, msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { + /* + * Make sure that ufshcd_compl_tm() does not trigger a + * use-after-free. + */ + req->end_io_data = NULL; ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); @@ -5652,16 +5777,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); - - spin_lock_irqsave(hba->host->host_lock, flags); - __clear_bit(free_slot, &hba->outstanding_tasks); - spin_unlock_irqrestore(hba->host->host_lock, flags); - } - clear_bit(free_slot, &hba->tm_condition); - ufshcd_put_tm_slot(hba, free_slot); - wake_up(&hba->tm_tag_wq); + spin_lock_irqsave(hba->host->host_lock, flags); + __clear_bit(free_slot, &hba->outstanding_tasks); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + blk_put_request(req); ufshcd_release(hba); return err; @@ -5718,9 +5840,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, * @hba: per-adapter instance * @req_upiu: upiu request * @rsp_upiu: upiu reply - * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target * @desc_buff: pointer to descriptor buffer, NULL if NA * @buff_len: descriptor size, 0 if NA + * @cmd_type: specifies the type (NOP, Query...) * @desc_op: descriptor operation * * Those type of requests uses UTP Transfer Request Descriptor - utrd. @@ -5734,9 +5856,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, struct utp_upiu_req *rsp_upiu, u8 *desc_buff, int *buff_len, - int cmd_type, + enum dev_cmd_type cmd_type, enum query_opcode desc_op) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err = 0; int tag; @@ -5746,7 +5870,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, down_read(&hba->clk_scaling_lock); - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_unlock; + } + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -5814,14 +5944,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, memcpy(desc_buff, descp, resp_len); *buff_len = resp_len; } else { - dev_warn(hba->dev, "rsp size is bigger than buffer"); + dev_warn(hba->dev, + "%s: rsp size %d is bigger than buffer size %d", + __func__, resp_len, *buff_len); *buff_len = 0; err = -EINVAL; } } - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); +out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -5849,7 +5981,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, enum query_opcode desc_op) { int err; - int cmd_type = DEV_CMD_TYPE_QUERY; + enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; struct utp_task_req_desc treq = { { 0 }, }; int ocs_value; u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; @@ -5941,6 +6073,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) out: hba->req_abort_count = 0; + ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err); if (!err) { err = SUCCESS; } else { @@ -6034,6 +6167,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) */ scsi_print_command(hba->lrb[tag].cmd); if (!hba->req_abort_count) { + ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0); ufshcd_print_host_regs(hba); ufshcd_print_host_state(hba); ufshcd_print_pwr_info(hba); @@ -6114,9 +6248,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); - clear_bit_unlock(tag, &hba->lrb_in_use); - wake_up(&hba->dev_cmd.tag_wq); - out: if (!err) { err = SUCCESS; @@ -6149,9 +6280,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) int err; unsigned long flags; - /* Reset the host controller */ + /* + * Stop the host controller and complete the requests + * cleared by h/w + */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_hba_stop(hba, false); + hba->silence_err_logs = true; + ufshcd_complete_requests(hba); + hba->silence_err_logs = false; spin_unlock_irqrestore(hba->host->host_lock, flags); /* scale up clocks to max frequency before full reinitialization */ @@ -6162,14 +6299,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) goto out; /* Establish the link again and restore the device */ - err = ufshcd_probe_hba(hba); + err = ufshcd_probe_hba(hba, false); if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) err = -EIO; out: if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); - + ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err); return err; } @@ -6185,22 +6322,15 @@ out: static int ufshcd_reset_and_restore(struct ufs_hba *hba) { int err = 0; - unsigned long flags; int retries = MAX_HOST_RESET_RETRIES; do { + /* Reset the attached device */ + ufshcd_vops_device_reset(hba); + err = ufshcd_host_reset_and_restore(hba); } while (err && --retries); - /* - * After reset the door-bell might be cleared, complete - * outstanding requests in s/w here. - */ - spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); - return err; } @@ -6355,7 +6485,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) if (!desc_buf) return; - ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, + desc_buf, buff_len); if (ret) { dev_err(hba->dev, "%s: Failed reading power descriptor.len = %d ret = %d", @@ -6445,13 +6576,13 @@ out: return ret; } -static int ufs_get_device_desc(struct ufs_hba *hba, - struct ufs_dev_desc *dev_desc) +static int ufs_get_device_desc(struct ufs_hba *hba) { int err; size_t buff_len; u8 model_index; u8 *desc_buf; + struct ufs_dev_info *dev_info = &hba->dev_info; buff_len = max_t(size_t, hba->desc_size.dev_desc, QUERY_DESC_MAX_SIZE + 1); @@ -6461,7 +6592,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba, goto out; } - err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf, + hba->desc_size.dev_desc); if (err) { dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", __func__, err); @@ -6472,45 +6604,48 @@ static int ufs_get_device_desc(struct ufs_hba *hba, * getting vendor (manufacturerID) and Bank Index in big endian * format */ - dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; - - /* Zero-pad entire buffer for string termination. */ - memset(desc_buf, 0, buff_len); - - err = ufshcd_read_string_desc(hba, model_index, desc_buf, - QUERY_DESC_MAX_SIZE, true/*ASCII*/); - if (err) { + err = ufshcd_read_string_desc(hba, model_index, + &dev_info->model, SD_ASCII_STD); + if (err < 0) { dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", __func__, err); goto out; } - desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; - strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE), - min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], - MAX_MODEL_LEN)); - - /* Null terminate the model string */ - dev_desc->model[MAX_MODEL_LEN] = '\0'; + /* + * ufshcd_read_string_desc returns size of the string + * reset the error value + */ + err = 0; out: kfree(desc_buf); return err; } -static void ufs_fixup_device_setup(struct ufs_hba *hba, - struct ufs_dev_desc *dev_desc) +static void ufs_put_device_desc(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + + kfree(dev_info->model); + dev_info->model = NULL; +} + +static void ufs_fixup_device_setup(struct ufs_hba *hba) { struct ufs_dev_fix *f; + struct ufs_dev_info *dev_info = &hba->dev_info; for (f = ufs_fixups; f->quirk; f++) { - if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid || - f->card.wmanufacturerid == UFS_ANY_VENDOR) && - (STR_PRFX_EQUAL(f->card.model, dev_desc->model) || - !strcmp(f->card.model, UFS_ANY_MODEL))) + if ((f->wmanufacturerid == dev_info->wmanufacturerid || + f->wmanufacturerid == UFS_ANY_VENDOR) && + ((dev_info->model && + STR_PRFX_EQUAL(f->model, dev_info->model)) || + !strcmp(f->model, UFS_ANY_MODEL))) hba->dev_quirks |= f->quirk; } } @@ -6681,17 +6816,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) { - int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist); - hba->ufs_stats.hibern8_exit_cnt = 0; hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); - - memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size); - memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size); - memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size); - memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size); - memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size); - hba->req_abort_count = 0; } @@ -6728,21 +6854,42 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba) &hba->desc_size.geom_desc); if (err) hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, &hba->desc_size.hlth_desc); if (err) hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; } -static void ufshcd_def_desc_sizes(struct ufs_hba *hba) +static int ufshcd_device_geo_params_init(struct ufs_hba *hba) { - hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; - hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; - hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; - hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; - hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; - hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; - hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; + int err; + size_t buff_len; + u8 *desc_buf; + + buff_len = hba->desc_size.geom_desc; + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0, + desc_buf, buff_len); + if (err) { + dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", + __func__, err); + goto out; + } + + if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1) + hba->dev_info.max_lu_supported = 32; + else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) + hba->dev_info.max_lu_supported = 8; + +out: + kfree(desc_buf); + return err; } static struct ufs_ref_clk ufs_ref_clk_freqs[] = { @@ -6813,15 +6960,92 @@ out: return err; } +static int ufshcd_device_params_init(struct ufs_hba *hba) +{ + bool flag; + int ret; + + /* Clear any previous UFS device information */ + memset(&hba->dev_info, 0, sizeof(hba->dev_info)); + + /* Init check for device descriptor sizes */ + ufshcd_init_desc_sizes(hba); + + /* Init UFS geometry descriptor related parameters */ + ret = ufshcd_device_geo_params_init(hba); + if (ret) + goto out; + + /* Check and apply UFS device quirks */ + ret = ufs_get_device_desc(hba); + if (ret) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, ret); + goto out; + } + + ufs_fixup_device_setup(hba); + + if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) + hba->dev_info.f_power_on_wp_en = flag; + + /* Probe maximum power mode co-supported by both UFS host and device */ + if (ufshcd_get_max_pwr_mode(hba)) + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); +out: + return ret; +} + +/** + * ufshcd_add_lus - probe and add UFS logical units + * @hba: per-adapter instance + */ +static int ufshcd_add_lus(struct ufs_hba *hba) +{ + int ret; + + ufshcd_init_icc_levels(hba); + + /* Add required well known logical units to scsi mid layer */ + ret = ufshcd_scsi_add_wlus(hba); + if (ret) + goto out; + + /* Initialize devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + hba->clk_scaling.saved_pwr_info.is_valid = true; + if (!hba->devfreq) { + ret = ufshcd_devfreq_init(hba); + if (ret) + goto out; + } + + hba->clk_scaling.is_allowed = true; + } + + ufs_bsg_probe(hba); + scsi_scan_host(hba->host); + pm_runtime_put_sync(hba->dev); + +out: + return ret; +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance + * @async: asynchronous execution or not * * Execute link-startup and verify device initialization */ -static int ufshcd_probe_hba(struct ufs_hba *hba) +static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) { - struct ufs_dev_desc card = {0}; int ret; ktime_t start = ktime_get(); @@ -6839,28 +7063,26 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* UniPro link is active now */ ufshcd_set_link_active(hba); - /* Enable Auto-Hibernate if configured */ - ufshcd_auto_hibern8_enable(hba); - + /* Verify device initialization by sending NOP OUT UPIU */ ret = ufshcd_verify_dev_init(hba); if (ret) goto out; + /* Initiate UFS initialization, and waiting until completion */ ret = ufshcd_complete_dev_init(hba); if (ret) goto out; - /* Init check for device descriptor sizes */ - ufshcd_init_desc_sizes(hba); - - ret = ufs_get_device_desc(hba, &card); - if (ret) { - dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", - __func__, ret); - goto out; + /* + * Initialize UFS device parameters used by driver, these + * parameters are associated with UFS descriptors. + */ + if (async) { + ret = ufshcd_device_params_init(hba); + if (ret) + goto out; } - ufs_fixup_device_setup(hba, &card); ufshcd_tune_unipro_params(hba); /* UFS device is also active now */ @@ -6868,11 +7090,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_force_reset_auto_bkops(hba); hba->wlun_dev_clr_ua = true; - if (ufshcd_get_max_pwr_mode(hba)) { - dev_err(hba->dev, - "%s: Failed getting max supported power mode\n", - __func__); - } else { + /* Gear up to HS gear if supported */ + if (hba->max_pwr_info.is_valid) { /* * Set the right value to bRefClkFreq before attempting to * switch to HS gears. @@ -6890,59 +7109,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* set the state as operational after switching to desired gear */ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - /* - * If we are in error handling context or in power management callbacks - * context, no need to scan the host - */ - if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { - bool flag; - - /* clear any previous UFS device information */ - memset(&hba->dev_info, 0, sizeof(hba->dev_info)); - if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) - hba->dev_info.f_power_on_wp_en = flag; - - if (!hba->is_init_prefetch) - ufshcd_init_icc_levels(hba); - - /* Add required well known logical units to scsi mid layer */ - if (ufshcd_scsi_add_wlus(hba)) - goto out; - - /* Initialize devfreq after UFS device is detected */ - if (ufshcd_is_clkscaling_supported(hba)) { - memcpy(&hba->clk_scaling.saved_pwr_info.info, - &hba->pwr_info, - sizeof(struct ufs_pa_layer_attr)); - hba->clk_scaling.saved_pwr_info.is_valid = true; - if (!hba->devfreq) { - ret = ufshcd_devfreq_init(hba); - if (ret) - goto out; - } - hba->clk_scaling.is_allowed = true; - } - - ufs_bsg_probe(hba); - - scsi_scan_host(hba->host); - pm_runtime_put_sync(hba->dev); - } - - if (!hba->is_init_prefetch) - hba->is_init_prefetch = true; + /* Enable Auto-Hibernate if configured */ + ufshcd_auto_hibern8_enable(hba); out: - /* - * If we failed to initialize the device or the device is not - * present, turn off the power/clocks etc. - */ - if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { - pm_runtime_put_sync(hba->dev); - ufshcd_exit_clk_scaling(hba); - ufshcd_hba_exit(hba); - } trace_ufshcd_init(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), @@ -6958,43 +7128,25 @@ out: static void ufshcd_async_scan(void *data, async_cookie_t cookie) { struct ufs_hba *hba = (struct ufs_hba *)data; + int ret; - ufshcd_probe_hba(hba); -} - -static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd) -{ - unsigned long flags; - struct Scsi_Host *host; - struct ufs_hba *hba; - int index; - bool found = false; - - if (!scmd || !scmd->device || !scmd->device->host) - return BLK_EH_DONE; - - host = scmd->device->host; - hba = shost_priv(host); - if (!hba) - return BLK_EH_DONE; - - spin_lock_irqsave(host->host_lock, flags); - - for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[index].cmd == scmd) { - found = true; - break; - } - } - - spin_unlock_irqrestore(host->host_lock, flags); + /* Initialize hba, detect and initialize UFS device */ + ret = ufshcd_probe_hba(hba, true); + if (ret) + goto out; + /* Probe and add UFS logical units */ + ret = ufshcd_add_lus(hba); +out: /* - * Bypass SCSI error handling and reset the block layer timer if this - * SCSI command was not actually dispatched to UFS driver, otherwise - * let SCSI layer handle the error as usual. + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. */ - return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER; + if (ret) { + pm_runtime_put_sync(hba->dev); + ufshcd_exit_clk_scaling(hba); + ufshcd_hba_exit(hba); + } } static const struct attribute_group *ufshcd_driver_groups[] = { @@ -7015,7 +7167,6 @@ static struct scsi_host_template ufshcd_driver_template = { .eh_abort_handler = ufshcd_abort, .eh_device_reset_handler = ufshcd_eh_device_reset_handler, .eh_host_reset_handler = ufshcd_eh_host_reset_handler, - .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -7025,6 +7176,7 @@ static struct scsi_host_template ufshcd_driver_template = { .track_queue_depth = 1, .sdev_groups = ufshcd_driver_groups, .dma_boundary = PAGE_SIZE - 1, + .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS, }; static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, @@ -7062,6 +7214,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { + if (!vreg) + return 0; + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } @@ -7450,6 +7605,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) ufshcd_setup_clocks(hba, false); ufshcd_setup_hba_vreg(hba, false); hba->is_powered = false; + ufs_put_device_desc(hba); } } @@ -7577,8 +7733,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, * turning off the link would also turn off the device. */ else if ((req_link_state == UIC_LINK_OFF_STATE) && - (!check_for_bkops || (check_for_bkops && - !hba->auto_bkops_enabled))) { + (!check_for_bkops || !hba->auto_bkops_enabled)) { /* * Let's make sure that link is in low power mode, we are doing * this currently by putting the link in Hibern8. Otherway to @@ -7784,6 +7939,11 @@ disable_clks: ret = ufshcd_vops_suspend(hba, pm_op); if (ret) goto set_link_active; + /* + * Disable the host irq as host controller as there won't be any + * host controller transaction expected till resume. + */ + ufshcd_disable_irq(hba); if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); @@ -7793,11 +7953,7 @@ disable_clks: hba->clk_gating.state = CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - /* - * Disable the host irq as host controller as there won't be any - * host controller transaction expected till resume. - */ - ufshcd_disable_irq(hba); + /* Put the host controller in low power mode if possible */ ufshcd_hba_vreg_set_lpm(hba); goto out; @@ -7820,6 +7976,8 @@ enable_gating: ufshcd_release(hba); out: hba->pm_op_in_progress = 0; + if (ret) + ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret); return ret; } @@ -7848,9 +8006,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto out; /* enable the host irq as host controller would be active soon */ - ret = ufshcd_enable_irq(hba); - if (ret) - goto disable_irq_and_vops_clks; + ufshcd_enable_irq(hba); ret = ufshcd_vreg_set_hpm(hba); if (ret) @@ -7901,12 +8057,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (hba->clk_scaling.is_allowed) ufshcd_resume_clkscaling(hba); - /* Schedule clock gating in case of no access to UFS device yet */ - ufshcd_release(hba); - /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + /* Schedule clock gating in case of no access to UFS device yet */ + ufshcd_release(hba); + goto out; set_old_link_state: @@ -7922,6 +8078,8 @@ disable_irq_and_vops_clks: ufshcd_setup_clocks(hba, false); out: hba->pm_op_in_progress = 0; + if (ret) + ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret); return ret; } @@ -8092,6 +8250,9 @@ int ufshcd_shutdown(struct ufs_hba *hba) { int ret = 0; + if (!hba->is_powered) + goto out; + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) goto out; @@ -8119,6 +8280,9 @@ void ufshcd_remove(struct ufs_hba *hba) { ufs_bsg_remove(hba); ufs_sysfs_remove_nodes(hba->dev); + blk_cleanup_queue(hba->tmf_queue); + blk_mq_free_tag_set(&hba->tmf_tag_set); + blk_cleanup_queue(hba->cmd_queue); scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); @@ -8197,6 +8361,18 @@ out_error: } EXPORT_SYMBOL(ufshcd_alloc_host); +/* This function exists because blk_mq_alloc_tag_set() requires this. */ +static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) +{ + WARN_ON_ONCE(true); + return BLK_STS_NOTSUPP; +} + +static const struct blk_mq_ops ufshcd_tmf_ops = { + .queue_rq = ufshcd_queue_tmf, +}; + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -8220,9 +8396,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->mmio_base = mmio_base; hba->irq = irq; - /* Set descriptor lengths to specification defaults */ - ufshcd_def_desc_sizes(hba); - err = ufshcd_hba_init(hba); if (err) goto out_error; @@ -8269,10 +8442,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->max_pwr_info.is_valid = false; - /* Initailize wait queue for task management */ - init_waitqueue_head(&hba->tm_wq); - init_waitqueue_head(&hba->tm_tag_wq); - /* Initialize work queues */ INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); @@ -8285,9 +8454,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) init_rwsem(&hba->clk_scaling_lock); - /* Initialize device management tag acquire wait queue */ - init_waitqueue_head(&hba->dev_cmd.tag_wq); - ufshcd_init_clk_gating(hba); ufshcd_init_clk_scaling(hba); @@ -8321,13 +8487,37 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto exit_gating; } + hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); + if (IS_ERR(hba->cmd_queue)) { + err = PTR_ERR(hba->cmd_queue); + goto out_remove_scsi_host; + } + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto free_cmd_queue; + hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + + /* Reset the attached device */ + ufshcd_vops_device_reset(hba); + /* Host controller enable */ err = ufshcd_hba_enable(hba); if (err) { dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_host_regs(hba); ufshcd_print_host_state(hba); - goto out_remove_scsi_host; + goto free_tmf_queue; } /* @@ -8364,6 +8554,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) return 0; +free_tmf_queue: + blk_cleanup_queue(hba->tmf_queue); +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); +free_cmd_queue: + blk_cleanup_queue(hba->cmd_queue); out_remove_scsi_host: scsi_remove_host(hba->host); exit_gating: diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 994d73d03207..2ae6c7c8528c 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -212,13 +212,11 @@ struct ufs_query { * @type: device management command type - Query, NOP OUT * @lock: lock to allow one command at a time * @complete: internal commands completion - * @tag_wq: wait queue until free command slot is available */ struct ufs_dev_cmd { enum dev_cmd_type type; struct mutex lock; struct completion *complete; - wait_queue_head_t tag_wq; struct ufs_query query; }; @@ -298,6 +296,7 @@ struct ufs_pwr_mode_info { * @resume: called during host controller PM callback * @dbg_register_dump: used to dump controller debug information * @phy_initialization: used to initialize phys + * @device_reset: called to issue a reset pulse on the UFS device */ struct ufs_hba_variant_ops { const char *name; @@ -321,11 +320,12 @@ struct ufs_hba_variant_ops { void (*setup_task_mgmt)(struct ufs_hba *, int, u8); void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, enum ufs_notify_change_status); - int (*apply_dev_quirks)(struct ufs_hba *); + int (*apply_dev_quirks)(struct ufs_hba *hba); int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *hba); int (*phy_initialization)(struct ufs_hba *); + void (*device_reset)(struct ufs_hba *hba); }; /* clock gating state */ @@ -412,17 +412,17 @@ struct ufs_init_prefetch { u32 icc_level; }; -#define UIC_ERR_REG_HIST_LENGTH 8 +#define UFS_ERR_REG_HIST_LENGTH 8 /** - * struct ufs_uic_err_reg_hist - keeps history of uic errors + * struct ufs_err_reg_hist - keeps history of errors * @pos: index to indicate cyclic buffer position * @reg: cyclic buffer for registers value * @tstamp: cyclic buffer for time stamp */ -struct ufs_uic_err_reg_hist { +struct ufs_err_reg_hist { int pos; - u32 reg[UIC_ERR_REG_HIST_LENGTH]; - ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH]; + u32 reg[UFS_ERR_REG_HIST_LENGTH]; + ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH]; }; /** @@ -436,15 +436,37 @@ struct ufs_uic_err_reg_hist { * @nl_err: tracks nl-uic errors * @tl_err: tracks tl-uic errors * @dme_err: tracks dme errors + * @auto_hibern8_err: tracks auto-hibernate errors + * @fatal_err: tracks fatal errors + * @linkup_err: tracks link-startup errors + * @resume_err: tracks resume errors + * @suspend_err: tracks suspend errors + * @dev_reset: tracks device reset events + * @host_reset: tracks host reset events + * @tsk_abort: tracks task abort events */ struct ufs_stats { u32 hibern8_exit_cnt; ktime_t last_hibern8_exit_tstamp; - struct ufs_uic_err_reg_hist pa_err; - struct ufs_uic_err_reg_hist dl_err; - struct ufs_uic_err_reg_hist nl_err; - struct ufs_uic_err_reg_hist tl_err; - struct ufs_uic_err_reg_hist dme_err; + + /* uic specific errors */ + struct ufs_err_reg_hist pa_err; + struct ufs_err_reg_hist dl_err; + struct ufs_err_reg_hist nl_err; + struct ufs_err_reg_hist tl_err; + struct ufs_err_reg_hist dme_err; + + /* fatal errors */ + struct ufs_err_reg_hist auto_hibern8_err; + struct ufs_err_reg_hist fatal_err; + struct ufs_err_reg_hist link_startup_err; + struct ufs_err_reg_hist resume_err; + struct ufs_err_reg_hist suspend_err; + + /* abnormal events */ + struct ufs_err_reg_hist dev_reset; + struct ufs_err_reg_hist host_reset; + struct ufs_err_reg_hist task_abort; }; /** @@ -459,7 +481,7 @@ struct ufs_stats { * @host: Scsi_Host instance of the driver * @dev: device handle * @lrb: local reference block - * @lrb_in_use: lrb in use + * @cmd_queue: Used to allocate command tags from hba->host->tag_set. * @outstanding_tasks: Bits representing outstanding task requests * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities @@ -471,17 +493,14 @@ struct ufs_stats { * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command * @uic_cmd_mutex: mutex for uic command - * @tm_wq: wait queue for task management - * @tm_tag_wq: wait queue for free task management slots - * @tm_slots_in_use: bit map of task management request slots in use + * @tmf_tag_set: TMF tag set. + * @tmf_queue: Used to allocate TMF tags. * @pwr_done: completion for power mode change - * @tm_condition: condition variable for task management * @ufshcd_state: UFSHCD states * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits * @ee_ctrl_mask: Exception event control mask * @is_powered: flag to check if HBA is powered - * @is_init_prefetch: flag to check if data was pre-fetched in initialization * @init_prefetch_data: data pre-fetched during initialization * @eh_work: Worker to handle UFS errors that require s/w attention * @eeh_work: Worker to handle exception events @@ -489,6 +508,7 @@ struct ufs_stats { * @uic_error: UFS interconnect layer error status * @saved_err: sticky error mask * @saved_uic_err: sticky UIC error mask + * @silence_err_logs: flag to silence error logs * @dev_cmd: ufs device management command information * @last_dme_cmd_tstamp: time stamp of the last completed DME command * @auto_bkops_enabled: to track whether bkops is enabled in device @@ -517,6 +537,7 @@ struct ufs_hba { struct Scsi_Host *host; struct device *dev; + struct request_queue *cmd_queue; /* * This field is to keep a reference to "scsi_device" corresponding to * "UFS device" W-LU. @@ -537,7 +558,6 @@ struct ufs_hba { u32 ahit; struct ufshcd_lrb *lrb; - unsigned long lrb_in_use; unsigned long outstanding_tasks; unsigned long outstanding_reqs; @@ -619,10 +639,8 @@ struct ufs_hba { /* Device deviations from standard UFS device spec. */ unsigned int dev_quirks; - wait_queue_head_t tm_wq; - wait_queue_head_t tm_tag_wq; - unsigned long tm_condition; - unsigned long tm_slots_in_use; + struct blk_mq_tag_set tmf_tag_set; + struct request_queue *tmf_queue; struct uic_command *active_uic_cmd; struct mutex uic_cmd_mutex; @@ -633,7 +651,6 @@ struct ufs_hba { u32 intr_mask; u16 ee_ctrl_mask; bool is_powered; - bool is_init_prefetch; struct ufs_init_prefetch init_prefetch_data; /* Work Queues */ @@ -646,6 +663,7 @@ struct ufs_hba { u32 saved_err; u32 saved_uic_err; struct ufs_stats ufs_stats; + bool silence_err_logs; /* Device management request data */ struct ufs_dev_cmd dev_cmd; @@ -692,6 +710,12 @@ struct ufs_hba { * the performance of ongoing read/write operations. */ #define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5) + /* + * This capability allows host controller driver to automatically + * enable runtime power management by itself instead of waiting + * for userspace to control the power management. + */ +#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6) struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; @@ -725,6 +749,10 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) { return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; } +static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND; +} static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) { @@ -769,12 +797,17 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) int ufshcd_alloc_host(struct device *, struct ufs_hba **); void ufshcd_dealloc_host(struct ufs_hba *); +int ufshcd_hba_enable(struct ufs_hba *hba); int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); +int ufshcd_make_hba_operational(struct ufs_hba *hba); void ufshcd_remove(struct ufs_hba *); +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, u32 val, unsigned long interval_us, unsigned long timeout_ms, bool can_sleep); void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); +void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, + u32 reg); static inline void check_upiu_size(void) { @@ -891,8 +924,14 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii); + +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); + +#define SD_ASCII_STD true +#define SD_RAW false +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + u8 **buf, bool ascii); int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); @@ -1045,6 +1084,14 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) hba->vops->dbg_register_dump(hba); } +static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->device_reset) { + hba->vops->device_reset(hba); + ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0); + } +} + extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /* diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index dbb75cd28dc8..c2961d37cc1c 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -195,7 +195,7 @@ enum { /* UECDL - Host UIC Error Code Data Link Layer 3Ch */ #define UIC_DATA_LINK_LAYER_ERROR 0x80000000 -#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF +#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF #define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2 #define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4 #define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8 diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index f539f873f94d..3dc4d8b76509 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -161,6 +161,17 @@ /* PHY Adapter Protocol Constants */ #define PA_MAXDATALANES 4 +#define DL_FC0ProtectionTimeOutVal_Default 8191 +#define DL_TC0ReplayTimeOutVal_Default 65535 +#define DL_AFC0ReqTimeOutVal_Default 32767 +#define DL_FC1ProtectionTimeOutVal_Default 8191 +#define DL_TC1ReplayTimeOutVal_Default 65535 +#define DL_AFC1ReqTimeOutVal_Default 32767 + +#define DME_LocalFC0ProtectionTimeOutVal 0xD041 +#define DME_LocalTC0ReplayTimeOutVal 0xD042 +#define DME_LocalAFC0ReqTimeOutVal 0xD043 + /* PA power modes */ enum { FAST_MODE = 1, |