diff options
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r-- | drivers/scsi/ufs/Kconfig | 2 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs.h | 132 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pci.c | 55 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pltfrm.c | 291 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 2514 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.h | 280 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshci.h | 9 | ||||
-rw-r--r-- | drivers/scsi/ufs/unipro.h | 56 |
8 files changed, 3053 insertions, 286 deletions
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index f07f90179bbc..6e07b2afddeb 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -35,6 +35,8 @@ config SCSI_UFSHCD tristate "Universal Flash Storage Controller Driver Core" depends on SCSI && SCSI_DMA + select PM_DEVFREQ + select DEVFREQ_GOV_SIMPLE_ONDEMAND ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index fafcf5e354c6..42c459a9d3fe 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -49,6 +49,27 @@ #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ (byte1 << 8) | (byte0)) +/* + * UFS device may have standard LUs and LUN id could be from 0x00 to + * 0x7F. Standard LUs use "Peripheral Device Addressing Format". + * UFS device may also have the Well Known LUs (also referred as W-LU) + * which again could be from 0x00 to 0x7F. For W-LUs, device only use + * the "Extended Addressing Format" which means the W-LUNs would be + * from 0xc100 (SCSI_W_LUN_BASE) onwards. + * This means max. LUN number reported from UFS device could be 0xC17F. + */ +#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F +#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) +#define UFS_UPIU_WLUN_ID (1 << 7) +#define UFS_UPIU_MAX_GENERAL_LUN 8 + +/* Well known logical unit id in LUN field of UPIU */ +enum { + UFS_UPIU_REPORT_LUNS_WLUN = 0x81, + UFS_UPIU_UFS_DEVICE_WLUN = 0xD0, + UFS_UPIU_BOOT_WLUN = 0xB0, + UFS_UPIU_RPMB_WLUN = 0xC4, +}; /* * UFS Protocol Information Unit related definitions @@ -108,11 +129,13 @@ enum { /* Flag idn for Query Requests*/ enum flag_idn { QUERY_FLAG_IDN_FDEVICEINIT = 0x01, + QUERY_FLAG_IDN_PWR_ON_WPE = 0x03, QUERY_FLAG_IDN_BKOPS_EN = 0x04, }; /* Attribute idn for Query requests */ enum attr_idn { + QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03, QUERY_ATTR_IDN_BKOPS_STATUS = 0x05, QUERY_ATTR_IDN_EE_CONTROL = 0x0D, QUERY_ATTR_IDN_EE_STATUS = 0x0E, @@ -129,10 +152,29 @@ enum desc_idn { QUERY_DESC_IDN_RFU_1 = 0x6, QUERY_DESC_IDN_GEOMETRY = 0x7, QUERY_DESC_IDN_POWER = 0x8, - QUERY_DESC_IDN_RFU_2 = 0x9, + QUERY_DESC_IDN_MAX, +}; + +enum desc_header_offset { + QUERY_DESC_LENGTH_OFFSET = 0x00, + QUERY_DESC_DESC_TYPE_OFFSET = 0x01, +}; + +enum ufs_desc_max_size { + QUERY_DESC_DEVICE_MAX_SIZE = 0x1F, + QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90, + QUERY_DESC_UNIT_MAX_SIZE = 0x23, + QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06, + /* + * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes + * of descriptor header. + */ + QUERY_DESC_STRING_MAX_SIZE = 0xFE, + QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44, + QUERY_DESC_POWER_MAX_SIZE = 0x62, + QUERY_DESC_RFU_MAX_SIZE = 0x00, }; -#define UNIT_DESC_MAX_SIZE 0x22 /* Unit descriptor parameters offsets in bytes*/ enum unit_desc_param { UNIT_DESC_PARAM_LEN = 0x0, @@ -153,6 +195,43 @@ enum unit_desc_param { UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, }; +/* + * Logical Unit Write Protect + * 00h: LU not write protected + * 01h: LU write protected when fPowerOnWPEn =1 + * 02h: LU permanently write protected when fPermanentWPEn =1 + */ +enum ufs_lu_wp_type { + UFS_LU_NO_WP = 0x00, + UFS_LU_POWER_ON_WP = 0x01, + UFS_LU_PERM_WP = 0x02, +}; + +/* bActiveICCLevel parameter current units */ +enum { + UFSHCD_NANO_AMP = 0, + UFSHCD_MICRO_AMP = 1, + UFSHCD_MILI_AMP = 2, + UFSHCD_AMP = 3, +}; + +#define POWER_DESC_MAX_SIZE 0x62 +#define POWER_DESC_MAX_ACTV_ICC_LVLS 16 + +/* Attribute bActiveICCLevel parameter bit masks definitions */ +#define ATTR_ICC_LVL_UNIT_OFFSET 14 +#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET) +#define ATTR_ICC_LVL_VALUE_MASK 0x3FF + +/* Power descriptor parameters offsets in bytes */ +enum power_desc_param_offset { + PWR_DESC_LEN = 0x0, + PWR_DESC_TYPE = 0x1, + PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2, + PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22, + PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42, +}; + /* Exception event mask values */ enum { MASK_EE_STATUS = 0xFFFF, @@ -160,11 +239,12 @@ enum { }; /* Background operation status */ -enum { +enum bkops_status { BKOPS_STATUS_NO_OP = 0x0, BKOPS_STATUS_NON_CRITICAL = 0x1, BKOPS_STATUS_PERF_IMPACT = 0x2, BKOPS_STATUS_CRITICAL = 0x3, + BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL, }; /* UTP QUERY Transaction Specific Fields OpCode */ @@ -225,6 +305,14 @@ enum { UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05, UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09, }; + +/* UFS device power modes */ +enum ufs_dev_pwr_mode { + UFS_ACTIVE_PWR_MODE = 1, + UFS_SLEEP_PWR_MODE = 2, + UFS_POWERDOWN_PWR_MODE = 3, +}; + /** * struct utp_upiu_header - UPIU header structure * @dword_0: UPIU header DW-0 @@ -362,4 +450,42 @@ struct ufs_query_res { struct utp_upiu_query upiu_res; }; +#define UFS_VREG_VCC_MIN_UV 2700000 /* uV */ +#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */ +#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */ +#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */ +#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */ +#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */ +#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */ +#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */ + +/* + * VCCQ & VCCQ2 current requirement when UFS device is in sleep state + * and link is in Hibern8 state. + */ +#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */ + +struct ufs_vreg { + struct regulator *reg; + const char *name; + bool enabled; + int min_uV; + int max_uV; + int min_uA; + int max_uA; +}; + +struct ufs_vreg_info { + struct ufs_vreg *vcc; + struct ufs_vreg *vccq; + struct ufs_vreg *vccq2; + struct ufs_vreg *vdd_hba; +}; + +struct ufs_dev_info { + bool f_power_on_wp_en; + /* Keeps information if any of the LU is power on write protected */ + bool is_lu_power_on_wp; +}; + #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index afaabe2aeac8..955ed5587011 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -43,34 +43,24 @@ * @pdev: pointer to PCI device handle * @state: power state * - * Returns -ENOSYS + * Returns 0 if successful + * Returns non-zero otherwise */ static int ufshcd_pci_suspend(struct device *dev) { - /* - * TODO: - * 1. Call ufshcd_suspend - * 2. Do bus specific power management - */ - - return -ENOSYS; + return ufshcd_system_suspend(dev_get_drvdata(dev)); } /** * ufshcd_pci_resume - resume power management function * @pdev: pointer to PCI device handle * - * Returns -ENOSYS + * Returns 0 if successful + * Returns non-zero otherwise */ static int ufshcd_pci_resume(struct device *dev) { - /* - * TODO: - * 1. Call ufshcd_resume. - * 2. Do bus specific wake up - */ - - return -ENOSYS; + return ufshcd_system_resume(dev_get_drvdata(dev)); } #else #define ufshcd_pci_suspend NULL @@ -80,30 +70,15 @@ static int ufshcd_pci_resume(struct device *dev) #ifdef CONFIG_PM_RUNTIME static int ufshcd_pci_runtime_suspend(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_suspend(hba); + return ufshcd_runtime_suspend(dev_get_drvdata(dev)); } static int ufshcd_pci_runtime_resume(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_resume(hba); + return ufshcd_runtime_resume(dev_get_drvdata(dev)); } static int ufshcd_pci_runtime_idle(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_idle(hba); + return ufshcd_runtime_idle(dev_get_drvdata(dev)); } #else /* !CONFIG_PM_RUNTIME */ #define ufshcd_pci_runtime_suspend NULL @@ -117,7 +92,7 @@ static int ufshcd_pci_runtime_idle(struct device *dev) */ static void ufshcd_pci_shutdown(struct pci_dev *pdev) { - ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev)); + ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); } /** @@ -164,7 +139,15 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mmio_base = pcim_iomap_table(pdev)[0]; - err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); + err = ufshcd_alloc_host(&pdev->dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + return err; + } + + INIT_LIST_HEAD(&hba->clk_list_head); + + err = ufshcd_init(hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); return err; diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 5e4623225422..8adf067ff019 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -35,53 +35,236 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/of.h> #include "ufshcd.h" +static const struct of_device_id ufs_of_match[]; +static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev) +{ + if (dev->of_node) { + const struct of_device_id *match; + + match = of_match_node(ufs_of_match, dev->of_node); + if (match) + return (struct ufs_hba_variant_ops *)match->data; + } + + return NULL; +} + +static int ufshcd_parse_clock_info(struct ufs_hba *hba) +{ + int ret = 0; + int cnt; + int i; + struct device *dev = hba->dev; + struct device_node *np = dev->of_node; + char *name; + u32 *clkfreq = NULL; + struct ufs_clk_info *clki; + int len = 0; + size_t sz = 0; + + if (!np) + goto out; + + INIT_LIST_HEAD(&hba->clk_list_head); + + cnt = of_property_count_strings(np, "clock-names"); + if (!cnt || (cnt == -EINVAL)) { + dev_info(dev, "%s: Unable to find clocks, assuming enabled\n", + __func__); + } else if (cnt < 0) { + dev_err(dev, "%s: count clock strings failed, err %d\n", + __func__, cnt); + ret = cnt; + } + + if (cnt <= 0) + goto out; + + if (!of_get_property(np, "freq-table-hz", &len)) { + dev_info(dev, "freq-table-hz property not specified\n"); + goto out; + } + + if (len <= 0) + goto out; + + sz = len / sizeof(*clkfreq); + if (sz != 2 * cnt) { + dev_err(dev, "%s len mismatch\n", "freq-table-hz"); + ret = -EINVAL; + goto out; + } + + clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq), + GFP_KERNEL); + if (!clkfreq) { + dev_err(dev, "%s: no memory\n", "freq-table-hz"); + ret = -ENOMEM; + goto out; + } + + ret = of_property_read_u32_array(np, "freq-table-hz", + clkfreq, sz); + if (ret && (ret != -EINVAL)) { + dev_err(dev, "%s: error reading array %d\n", + "freq-table-hz", ret); + goto free_clkfreq; + } + + for (i = 0; i < sz; i += 2) { + ret = of_property_read_string_index(np, + "clock-names", i/2, (const char **)&name); + if (ret) + goto free_clkfreq; + + clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); + if (!clki) { + ret = -ENOMEM; + goto free_clkfreq; + } + + clki->min_freq = clkfreq[i]; + clki->max_freq = clkfreq[i+1]; + clki->name = kstrdup(name, GFP_KERNEL); + dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz", + clki->min_freq, clki->max_freq, clki->name); + list_add_tail(&clki->list, &hba->clk_list_head); + } +free_clkfreq: + kfree(clkfreq); +out: + return ret; +} + +#define MAX_PROP_SIZE 32 +static int ufshcd_populate_vreg(struct device *dev, const char *name, + struct ufs_vreg **out_vreg) +{ + int ret = 0; + char prop_name[MAX_PROP_SIZE]; + struct ufs_vreg *vreg = NULL; + struct device_node *np = dev->of_node; + + if (!np) { + dev_err(dev, "%s: non DT initialization\n", __func__); + goto out; + } + + snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name); + if (!of_parse_phandle(np, prop_name, 0)) { + dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n", + __func__, prop_name); + goto out; + } + + vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); + if (!vreg) { + dev_err(dev, "No memory for %s regulator\n", name); + goto out; + } + + vreg->name = kstrdup(name, GFP_KERNEL); + + /* if fixed regulator no need further initialization */ + snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name); + if (of_property_read_bool(np, prop_name)) + goto out; + + snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); + ret = of_property_read_u32(np, prop_name, &vreg->max_uA); + if (ret) { + dev_err(dev, "%s: unable to find %s err %d\n", + __func__, prop_name, ret); + goto out_free; + } + + vreg->min_uA = 0; + if (!strcmp(name, "vcc")) { + if (of_property_read_bool(np, "vcc-supply-1p8")) { + vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV; + vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV; + } else { + vreg->min_uV = UFS_VREG_VCC_MIN_UV; + vreg->max_uV = UFS_VREG_VCC_MAX_UV; + } + } else if (!strcmp(name, "vccq")) { + vreg->min_uV = UFS_VREG_VCCQ_MIN_UV; + vreg->max_uV = UFS_VREG_VCCQ_MAX_UV; + } else if (!strcmp(name, "vccq2")) { + vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV; + vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV; + } + + goto out; + +out_free: + devm_kfree(dev, vreg); + vreg = NULL; +out: + if (!ret) + *out_vreg = vreg; + return ret; +} + +/** + * ufshcd_parse_regulator_info - get regulator info from device tree + * @hba: per adapter instance + * + * Get regulator info from device tree for vcc, vccq, vccq2 power supplies. + * If any of the supplies are not defined it is assumed that they are always-on + * and hence return zero. If the property is defined but parsing is failed + * then return corresponding error. + */ +static int ufshcd_parse_regulator_info(struct ufs_hba *hba) +{ + int err; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vcc", &info->vcc); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vccq", &info->vccq); + if (err) + goto out; + + err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2); +out: + return err; +} + #ifdef CONFIG_PM /** * ufshcd_pltfrm_suspend - suspend power management function * @dev: pointer to device handle * - * - * Returns 0 + * Returns 0 if successful + * Returns non-zero otherwise */ static int ufshcd_pltfrm_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct ufs_hba *hba = platform_get_drvdata(pdev); - - /* - * TODO: - * 1. Call ufshcd_suspend - * 2. Do bus specific power management - */ - - disable_irq(hba->irq); - - return 0; + return ufshcd_system_suspend(dev_get_drvdata(dev)); } /** * ufshcd_pltfrm_resume - resume power management function * @dev: pointer to device handle * - * Returns 0 + * Returns 0 if successful + * Returns non-zero otherwise */ static int ufshcd_pltfrm_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct ufs_hba *hba = platform_get_drvdata(pdev); - - /* - * TODO: - * 1. Call ufshcd_resume. - * 2. Do bus specific wake up - */ - - enable_irq(hba->irq); - - return 0; + return ufshcd_system_resume(dev_get_drvdata(dev)); } #else #define ufshcd_pltfrm_suspend NULL @@ -91,30 +274,15 @@ static int ufshcd_pltfrm_resume(struct device *dev) #ifdef CONFIG_PM_RUNTIME static int ufshcd_pltfrm_runtime_suspend(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_suspend(hba); + return ufshcd_runtime_suspend(dev_get_drvdata(dev)); } static int ufshcd_pltfrm_runtime_resume(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_resume(hba); + return ufshcd_runtime_resume(dev_get_drvdata(dev)); } static int ufshcd_pltfrm_runtime_idle(struct device *dev) { - struct ufs_hba *hba = dev_get_drvdata(dev); - - if (!hba) - return 0; - - return ufshcd_runtime_idle(hba); + return ufshcd_runtime_idle(dev_get_drvdata(dev)); } #else /* !CONFIG_PM_RUNTIME */ #define ufshcd_pltfrm_runtime_suspend NULL @@ -122,6 +290,11 @@ static int ufshcd_pltfrm_runtime_idle(struct device *dev) #define ufshcd_pltfrm_runtime_idle NULL #endif /* CONFIG_PM_RUNTIME */ +static void ufshcd_pltfrm_shutdown(struct platform_device *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev)); +} + /** * ufshcd_pltfrm_probe - probe routine of the driver * @pdev: pointer to Platform device handle @@ -138,8 +311,8 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mmio_base = devm_ioremap_resource(dev, mem_res); - if (IS_ERR(mmio_base)) { - err = PTR_ERR(mmio_base); + if (IS_ERR(*(void **)&mmio_base)) { + err = PTR_ERR(*(void **)&mmio_base); goto out; } @@ -150,10 +323,31 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) goto out; } + err = ufshcd_alloc_host(dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + goto out; + } + + hba->vops = get_variant_ops(&pdev->dev); + + err = ufshcd_parse_clock_info(hba); + if (err) { + dev_err(&pdev->dev, "%s: clock parse failed %d\n", + __func__, err); + goto out; + } + err = ufshcd_parse_regulator_info(hba); + if (err) { + dev_err(&pdev->dev, "%s: regulator init failed %d\n", + __func__, err); + goto out; + } + pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - err = ufshcd_init(dev, &hba, mmio_base, irq); + err = ufshcd_init(hba, mmio_base, irq); if (err) { dev_err(dev, "Intialization failed\n"); goto out_disable_rpm; @@ -201,6 +395,7 @@ static const struct dev_pm_ops ufshcd_dev_pm_ops = { static struct platform_driver ufshcd_pltfrm_driver = { .probe = ufshcd_pltfrm_probe, .remove = ufshcd_pltfrm_remove, + .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "ufshcd", .owner = THIS_MODULE, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index ba27215b8034..497c38a4a866 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3,6 +3,7 @@ * * This code is based on drivers/scsi/ufs/ufshcd.c * Copyright (C) 2011-2013 Samsung India Software Operations + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> @@ -31,16 +32,19 @@ * circumstances will the contributor of this Program be liable for * any damages of any kind arising from your use or distribution of * this program. + * + * The Linux Foundation chooses to take subject only to the GPLv2 + * license terms, and distributes only under these terms. */ #include <linux/async.h> +#include <linux/devfreq.h> #include "ufshcd.h" #include "unipro.h" #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\ - UIC_POWER_MODE |\ UFSHCD_ERROR_MASK) /* UIC command timeout, unit: ms */ #define UIC_CMD_TIMEOUT 500 @@ -58,16 +62,44 @@ /* Task management command timeout */ #define TM_CMD_TIMEOUT 100 /* msecs */ +/* maximum number of link-startup retries */ +#define DME_LINKSTARTUP_RETRIES 3 + +/* maximum number of reset retries before giving up */ +#define MAX_HOST_RESET_RETRIES 5 + /* Expose the flag value from utp_upiu_query.value */ #define MASK_QUERY_UPIU_FLAG_LOC 0xFF /* Interrupt aggregation default timeout, unit: 40us */ #define INT_AGGR_DEF_TO 0x02 +#define ufshcd_toggle_vreg(_dev, _vreg, _on) \ + ({ \ + int _ret; \ + if (_on) \ + _ret = ufshcd_enable_vreg(_dev, _vreg); \ + else \ + _ret = ufshcd_disable_vreg(_dev, _vreg); \ + _ret; \ + }) + +static u32 ufs_query_desc_max_size[] = { + QUERY_DESC_DEVICE_MAX_SIZE, + QUERY_DESC_CONFIGURAION_MAX_SIZE, + QUERY_DESC_UNIT_MAX_SIZE, + QUERY_DESC_RFU_MAX_SIZE, + QUERY_DESC_INTERCONNECT_MAX_SIZE, + QUERY_DESC_STRING_MAX_SIZE, + QUERY_DESC_RFU_MAX_SIZE, + QUERY_DESC_GEOMETRY_MAZ_SIZE, + QUERY_DESC_POWER_MAX_SIZE, + QUERY_DESC_RFU_MAX_SIZE, +}; + enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, - UFSHCD_MAX_LUNS = 8, UFSHCD_CMD_PER_LUN = 32, UFSHCD_CAN_QUEUE = 32, }; @@ -106,12 +138,79 @@ enum { #define ufshcd_clear_eh_in_progress(h) \ (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) +#define ufshcd_set_ufs_dev_active(h) \ + ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) +#define ufshcd_set_ufs_dev_sleep(h) \ + ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) +#define ufshcd_set_ufs_dev_poweroff(h) \ + ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) +#define ufshcd_is_ufs_dev_active(h) \ + ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) +#define ufshcd_is_ufs_dev_sleep(h) \ + ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) +#define ufshcd_is_ufs_dev_poweroff(h) \ + ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) + +static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { + {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, + {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, + {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, + {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, +}; + +static inline enum ufs_dev_pwr_mode +ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) +{ + return ufs_pm_lvl_states[lvl].dev_state; +} + +static inline enum uic_link_state +ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) +{ + return ufs_pm_lvl_states[lvl].link_state; +} + static void ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); -static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, - struct scsi_device *sdev); +static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_probe_hba(struct ufs_hba *hba); +static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, + bool skip_ref_clk); +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); +static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); +static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); +static irqreturn_t ufshcd_intr(int irq, void *__hba); +static int ufshcd_config_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *desired_pwr_mode); + +static inline int ufshcd_enable_irq(struct ufs_hba *hba) +{ + int ret = 0; + + if (!hba->is_irq_enabled) { + ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, + hba); + if (ret) + dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", + __func__, ret); + hba->is_irq_enabled = true; + } + + return ret; +} + +static inline void ufshcd_disable_irq(struct ufs_hba *hba) +{ + if (hba->is_irq_enabled) { + free_irq(hba->irq, hba); + hba->is_irq_enabled = false; + } +} /* * ufshcd_wait_for_register - wait for register value to change @@ -175,13 +274,14 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) /** * ufshcd_is_device_present - Check if any device connected to * the host controller - * @reg_hcs - host controller status register value + * @hba: pointer to adapter instance * * Returns 1 if device present, 0 if no device detected */ -static inline int ufshcd_is_device_present(u32 reg_hcs) +static inline int ufshcd_is_device_present(struct ufs_hba *hba) { - return (DEVICE_PRESENT & reg_hcs) ? 1 : 0; + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & + DEVICE_PRESENT) ? 1 : 0; } /** @@ -413,6 +513,265 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; } +static void ufshcd_ungate_work(struct work_struct *work) +{ + int ret; + unsigned long flags; + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_gating.ungate_work); + + cancel_delayed_work_sync(&hba->clk_gating.gate_work); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == CLKS_ON) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + goto unblock_reqs; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_setup_clocks(hba, true); + + /* Exit from hibern8 */ + if (ufshcd_can_hibern8_during_gating(hba)) { + /* Prevent gating in this path */ + hba->clk_gating.is_suspended = true; + if (ufshcd_is_link_hibern8(hba)) { + ret = ufshcd_uic_hibern8_exit(hba); + if (ret) + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", + __func__, ret); + else + ufshcd_set_link_active(hba); + } + hba->clk_gating.is_suspended = false; + } +unblock_reqs: + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_resume_device(hba->devfreq); + scsi_unblock_requests(hba->host); +} + +/** + * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. + * Also, exit from hibern8 mode and set the link as active. + * @hba: per adapter instance + * @async: This indicates whether caller should ungate clocks asynchronously. + */ +int ufshcd_hold(struct ufs_hba *hba, bool async) +{ + int rc = 0; + unsigned long flags; + + if (!ufshcd_is_clkgating_allowed(hba)) + goto out; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.active_reqs++; + +start: + switch (hba->clk_gating.state) { + case CLKS_ON: + break; + case REQ_CLKS_OFF: + if (cancel_delayed_work(&hba->clk_gating.gate_work)) { + hba->clk_gating.state = CLKS_ON; + break; + } + /* + * If we here, it means gating work is either done or + * currently running. Hence, fall through to cancel gating + * work and to enable clocks. + */ + case CLKS_OFF: + scsi_block_requests(hba->host); + hba->clk_gating.state = REQ_CLKS_ON; + schedule_work(&hba->clk_gating.ungate_work); + /* + * fall through to check if we should wait for this + * work to be done or not. + */ + case REQ_CLKS_ON: + if (async) { + rc = -EAGAIN; + hba->clk_gating.active_reqs--; + break; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + flush_work(&hba->clk_gating.ungate_work); + /* Make sure state is CLKS_ON before returning */ + spin_lock_irqsave(hba->host->host_lock, flags); + goto start; + default: + dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", + __func__, hba->clk_gating.state); + break; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return rc; +} + +static void ufshcd_gate_work(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, struct ufs_hba, + clk_gating.gate_work.work); + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.is_suspended) { + hba->clk_gating.state = CLKS_ON; + goto rel_lock; + } + + if (hba->clk_gating.active_reqs + || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL + || hba->lrb_in_use || hba->outstanding_tasks + || hba->active_uic_cmd || hba->uic_async_done) + goto rel_lock; + + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* put the link into hibern8 mode before turning off clocks */ + if (ufshcd_can_hibern8_during_gating(hba)) { + if (ufshcd_uic_hibern8_enter(hba)) { + hba->clk_gating.state = CLKS_ON; + goto out; + } + ufshcd_set_link_hibern8(hba); + } + + if (ufshcd_is_clkscaling_enabled(hba)) { + devfreq_suspend_device(hba->devfreq); + hba->clk_scaling.window_start_t = 0; + } + + if (!ufshcd_is_link_active(hba)) + ufshcd_setup_clocks(hba, false); + else + /* If link is active, device ref_clk can't be switched off */ + __ufshcd_setup_clocks(hba, false, true); + + /* + * In case you are here to cancel this work the gating state + * would be marked as REQ_CLKS_ON. In this case keep the state + * as REQ_CLKS_ON which would anyway imply that clocks are off + * and a request to turn them on is pending. By doing this way, + * we keep the state machine in tact and this would ultimately + * prevent from doing cancel work multiple times when there are + * new requests arriving before the current cancel work is done. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == REQ_CLKS_OFF) + hba->clk_gating.state = CLKS_OFF; + +rel_lock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return; +} + +/* host lock must be held before calling this variant */ +static void __ufshcd_release(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkgating_allowed(hba)) + return; + + hba->clk_gating.active_reqs--; + + if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended + || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL + || hba->lrb_in_use || hba->outstanding_tasks + || hba->active_uic_cmd || hba->uic_async_done) + return; + + hba->clk_gating.state = REQ_CLKS_OFF; + schedule_delayed_work(&hba->clk_gating.gate_work, + msecs_to_jiffies(hba->clk_gating.delay_ms)); +} + +void ufshcd_release(struct ufs_hba *hba) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + __ufshcd_release(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static ssize_t ufshcd_clkgate_delay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); +} + +static ssize_t ufshcd_clkgate_delay_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned long flags, value; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = value; + spin_unlock_irqrestore(hba->host->host_lock, flags); + return count; +} + +static void ufshcd_init_clk_gating(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkgating_allowed(hba)) + return; + + hba->clk_gating.delay_ms = 150; + INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); + INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); + + hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; + hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; + sysfs_attr_init(&hba->clk_gating.delay_attr.attr); + hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; + hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; + if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) + dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); +} + +static void ufshcd_exit_clk_gating(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkgating_allowed(hba)) + return; + device_remove_file(hba->dev, &hba->clk_gating.delay_attr); +} + +/* Must be called with host lock acquired */ +static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) +{ + if (!ufshcd_is_clkscaling_enabled(hba)) + return; + + if (!hba->clk_scaling.is_busy_started) { + hba->clk_scaling.busy_start_t = ktime_get(); + hba->clk_scaling.is_busy_started = true; + } +} + +static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) +{ + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + + if (!ufshcd_is_clkscaling_enabled(hba)) + return; + + if (!hba->outstanding_reqs && scaling->is_busy_started) { + scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), + scaling->busy_start_t)); + scaling->busy_start_t = ktime_set(0, 0); + scaling->is_busy_started = false; + } +} /** * ufshcd_send_command - Send SCSI or device management commands * @hba: per adapter instance @@ -421,6 +780,7 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) static inline void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { + ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); } @@ -576,15 +936,12 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * @uic_cmd: UIC command * * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called - * with mutex held. + * with mutex held and host_lock locked. * Returns 0 only if success. */ static int __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { - int ret; - unsigned long flags; - if (!ufshcd_ready_for_uic_cmd(hba)) { dev_err(hba->dev, "Controller not ready to accept UIC commands\n"); @@ -593,13 +950,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) init_completion(&uic_cmd->done); - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_dispatch_uic_cmd(hba, uic_cmd); - spin_unlock_irqrestore(hba->host->host_lock, flags); - - ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); - return ret; + return 0; } /** @@ -613,11 +966,19 @@ static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { int ret; + unsigned long flags; + ufshcd_hold(hba, false); mutex_lock(&hba->uic_cmd_mutex); + spin_lock_irqsave(hba->host->host_lock, flags); ret = __ufshcd_send_uic_cmd(hba, uic_cmd); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (!ret) + ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); + mutex_unlock(&hba->uic_cmd_mutex); + ufshcd_release(hba); return ret; } @@ -867,6 +1228,32 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) return ret; } +/* + * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN + * @scsi_lun: scsi LUN id + * + * Returns UPIU LUN id + */ +static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) +{ + if (scsi_is_wlun(scsi_lun)) + return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) + | UFS_UPIU_WLUN_ID; + else + return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; +} + +/** + * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID + * @scsi_lun: UPIU W-LUN id + * + * Returns SCSI W-LUN id + */ +static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) +{ + return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; +} + /** * ufshcd_queuecommand - main entry point for SCSI requests * @cmd: command from SCSI Midlayer @@ -918,6 +1305,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } + err = ufshcd_hold(hba, true); + if (err) { + err = SCSI_MLQUEUE_HOST_BUSY; + clear_bit_unlock(tag, &hba->lrb_in_use); + goto out; + } + WARN_ON(hba->clk_gating.state != CLKS_ON); + lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); @@ -925,7 +1320,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; lrbp->sense_buffer = cmd->sense_buffer; lrbp->task_tag = tag; - lrbp->lun = cmd->device->lun; + lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = false; lrbp->command_type = UTP_CMD_TYPE_SCSI; @@ -1193,6 +1588,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, BUG_ON(!hba); + ufshcd_hold(hba, false); mutex_lock(&hba->dev_cmd.lock); ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); @@ -1236,6 +1632,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); return err; } @@ -1259,6 +1656,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, BUG_ON(!hba); + ufshcd_hold(hba, false); if (!attr_val) { dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", __func__, opcode); @@ -1298,6 +1696,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: mutex_unlock(&hba->dev_cmd.lock); out: + ufshcd_release(hba); return err; } @@ -1325,6 +1724,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba, BUG_ON(!hba); + ufshcd_hold(hba, false); if (!desc_buf) { dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", __func__, opcode); @@ -1374,10 +1774,120 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba, out_unlock: mutex_unlock(&hba->dev_cmd.lock); out: + ufshcd_release(hba); return err; } /** + * ufshcd_read_desc_param - read the specified descriptor parameter + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_index: descriptor index + * @param_offset: offset of the parameter to read + * @param_read_buf: pointer to buffer where parameter would be read + * @param_size: sizeof(param_read_buf) + * + * Return 0 in case of success, non-zero otherwise + */ +static int ufshcd_read_desc_param(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u32 param_offset, + u8 *param_read_buf, + u32 param_size) +{ + int ret; + u8 *desc_buf; + u32 buff_len; + bool is_kmalloc = true; + + /* safety checks */ + if (desc_id >= QUERY_DESC_IDN_MAX) + return -EINVAL; + + buff_len = ufs_query_desc_max_size[desc_id]; + if ((param_offset + param_size) > buff_len) + return -EINVAL; + + if (!param_offset && (param_size == buff_len)) { + /* memory space already available to hold full descriptor */ + desc_buf = param_read_buf; + is_kmalloc = false; + } else { + /* allocate memory to hold full descriptor */ + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) + return -ENOMEM; + } + + ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, desc_buf, + &buff_len); + + if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || + (desc_buf[QUERY_DESC_LENGTH_OFFSET] != + ufs_query_desc_max_size[desc_id]) + || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) { + dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d", + __func__, desc_id, param_offset, buff_len, ret); + if (!ret) + ret = -EINVAL; + + goto out; + } + + if (is_kmalloc) + memcpy(param_read_buf, &desc_buf[param_offset], param_size); +out: + if (is_kmalloc) + kfree(desc_buf); + return ret; +} + +static inline int ufshcd_read_desc(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + u8 *buf, + u32 size) +{ + return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); +} + +static inline int ufshcd_read_power_desc(struct ufs_hba *hba, + u8 *buf, + u32 size) +{ + return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); +} + +/** + * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter + * @hba: Pointer to adapter instance + * @lun: lun id + * @param_offset: offset of the parameter to read + * @param_read_buf: pointer to buffer where parameter would be read + * @param_size: sizeof(param_read_buf) + * + * Return 0 in case of success, non-zero otherwise + */ +static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, + int lun, + enum unit_desc_param param_offset, + u8 *param_read_buf, + u32 param_size) +{ + /* + * Unit descriptors are only available for general purpose LUs (LUN id + * from 0 to 7) and RPMB Well known LU. + */ + if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN)) + return -EOPNOTSUPP; + + return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, + param_offset, param_read_buf, param_size); +} + +/** * ufshcd_memory_alloc - allocate memory for host memory space data structures * @hba: per adapter instance * @@ -1621,44 +2131,54 @@ out: EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); /** - * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage - * using DME_SET primitives. + * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power + * state) and waits for it to take effect. + * * @hba: per adapter instance - * @mode: powr mode value + * @cmd: UIC command to execute + * + * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & + * DME_HIBERNATE_EXIT commands take some time to take its effect on both host + * and device UniPro link and hence it's final completion would be indicated by + * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in + * addition to normal UIC command completion Status (UCCS). This function only + * returns after the relevant status bits indicate the completion. * * Returns 0 on success, non-zero value on failure */ -static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) { - struct uic_command uic_cmd = {0}; - struct completion pwr_done; + struct completion uic_async_done; unsigned long flags; u8 status; int ret; - uic_cmd.command = UIC_CMD_DME_SET; - uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); - uic_cmd.argument3 = mode; - init_completion(&pwr_done); - mutex_lock(&hba->uic_cmd_mutex); + init_completion(&uic_async_done); spin_lock_irqsave(hba->host->host_lock, flags); - hba->pwr_done = &pwr_done; + hba->uic_async_done = &uic_async_done; + ret = __ufshcd_send_uic_cmd(hba, cmd); spin_unlock_irqrestore(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, &uic_cmd); if (ret) { dev_err(hba->dev, - "pwr mode change with mode 0x%x uic error %d\n", - mode, ret); + "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", + cmd->command, cmd->argument3, ret); + goto out; + } + ret = ufshcd_wait_for_uic_cmd(hba, cmd); + if (ret) { + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", + cmd->command, cmd->argument3, ret); goto out; } - if (!wait_for_completion_timeout(hba->pwr_done, + if (!wait_for_completion_timeout(hba->uic_async_done, msecs_to_jiffies(UIC_CMD_TIMEOUT))) { dev_err(hba->dev, - "pwr mode change with mode 0x%x completion timeout\n", - mode); + "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", + cmd->command, cmd->argument3); ret = -ETIMEDOUT; goto out; } @@ -1666,53 +2186,144 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) status = ufshcd_get_upmcrs(hba); if (status != PWR_LOCAL) { dev_err(hba->dev, - "pwr mode change failed, host umpcrs:0x%x\n", - status); + "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n", + cmd->command, status); ret = (status != PWR_OK) ? status : -1; } out: spin_lock_irqsave(hba->host->host_lock, flags); - hba->pwr_done = NULL; + hba->uic_async_done = NULL; spin_unlock_irqrestore(hba->host->host_lock, flags); mutex_unlock(&hba->uic_cmd_mutex); + return ret; } /** - * ufshcd_config_max_pwr_mode - Set & Change power mode with - * maximum capability attribute information. + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage + * using DME_SET primitives. * @hba: per adapter instance + * @mode: powr mode value * * Returns 0 on success, non-zero value on failure */ -static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) +static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) { - enum {RX = 0, TX = 1}; - u32 lanes[] = {1, 1}; - u32 gear[] = {1, 1}; - u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE}; + struct uic_command uic_cmd = {0}; int ret; + uic_cmd.command = UIC_CMD_DME_SET; + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); + uic_cmd.argument3 = mode; + ufshcd_hold(hba, false); + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + ufshcd_release(hba); + + return ret; +} + +static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + + uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; + + return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); +} + +static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + if (ret) { + ufshcd_set_link_off(hba); + ret = ufshcd_host_reset_and_restore(hba); + } + + return ret; +} + +/** + * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device + * @hba: per-adapter instance + */ +static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) +{ + struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; + + if (hba->max_pwr_info.is_valid) + return 0; + + pwr_info->pwr_tx = FASTAUTO_MODE; + pwr_info->pwr_rx = FASTAUTO_MODE; + pwr_info->hs_rate = PA_HS_MODE_B; + /* Get the connected lane count */ - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), + &pwr_info->lane_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &pwr_info->lane_tx); + + if (!pwr_info->lane_rx || !pwr_info->lane_tx) { + dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } /* * First, get the maximum gears of HS speed. * If a zero value, it means there is no HSGEAR capability. * Then, get the maximum gears of PWM speed. */ - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); - if (!gear[RX]) { - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); - pwr[RX] = SLOWAUTO_MODE; + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &pwr_info->gear_rx); + if (!pwr_info->gear_rx) { + dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", + __func__, pwr_info->gear_rx); + return -EINVAL; + } + pwr_info->pwr_rx = SLOWAUTO_MODE; } - ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); - if (!gear[TX]) { + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), - &gear[TX]); - pwr[TX] = SLOWAUTO_MODE; + &pwr_info->gear_tx); + if (!pwr_info->gear_tx) { + dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", + __func__, pwr_info->gear_tx); + return -EINVAL; + } + pwr_info->pwr_tx = SLOWAUTO_MODE; + } + + hba->max_pwr_info.is_valid = true; + return 0; +} + +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode) +{ + int ret; + + /* if already configured to the requested pwr_mode */ + if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && + pwr_mode->gear_tx == hba->pwr_info.gear_tx && + pwr_mode->lane_rx == hba->pwr_info.lane_rx && + pwr_mode->lane_tx == hba->pwr_info.lane_tx && + pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && + pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && + pwr_mode->hs_rate == hba->pwr_info.hs_rate) { + dev_dbg(hba->dev, "%s: power already configured\n", __func__); + return 0; } /* @@ -1721,23 +2332,67 @@ static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, * - PA_HSSERIES */ - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); - if (pwr[RX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), + pwr_mode->lane_rx); + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE) ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); - if (pwr[TX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), + pwr_mode->lane_tx); + if (pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FAST_MODE) ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); + else + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); - if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) - ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); + if (pwr_mode->pwr_rx == FASTAUTO_MODE || + pwr_mode->pwr_tx == FASTAUTO_MODE || + pwr_mode->pwr_rx == FAST_MODE || + pwr_mode->pwr_tx == FAST_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), + pwr_mode->hs_rate); - ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); - if (ret) + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 + | pwr_mode->pwr_tx); + + if (ret) { dev_err(hba->dev, - "pwr_mode: power mode change failed %d\n", ret); + "%s: power mode change failed %d\n", __func__, ret); + } else { + if (hba->vops && hba->vops->pwr_change_notify) + hba->vops->pwr_change_notify(hba, + POST_CHANGE, NULL, pwr_mode); + + memcpy(&hba->pwr_info, pwr_mode, + sizeof(struct ufs_pa_layer_attr)); + } + + return ret; +} + +/** + * ufshcd_config_pwr_mode - configure a new power mode + * @hba: per-adapter instance + * @desired_pwr_mode: desired power configuration + */ +static int ufshcd_config_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *desired_pwr_mode) +{ + struct ufs_pa_layer_attr final_params = { 0 }; + int ret; + + if (hba->vops && hba->vops->pwr_change_notify) + hba->vops->pwr_change_notify(hba, + PRE_CHANGE, desired_pwr_mode, &final_params); + else + memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); + + ret = ufshcd_change_power_mode(hba, &final_params); return ret; } @@ -1798,11 +2453,10 @@ out: * @hba: per adapter instance * * To bring UFS host controller to operational state, - * 1. Check if device is present - * 2. Enable required interrupts - * 3. Configure interrupt aggregation - * 4. Program UTRL and UTMRL base addres - * 5. Configure run-stop-registers + * 1. Enable required interrupts + * 2. Configure interrupt aggregation + * 3. Program UTRL and UTMRL base addres + * 4. Configure run-stop-registers * * Returns 0 on success, non-zero value on failure */ @@ -1811,14 +2465,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) int err = 0; u32 reg; - /* check if device present */ - reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); - if (!ufshcd_is_device_present(reg)) { - dev_err(hba->dev, "cc: Device not present\n"); - err = -ENXIO; - goto out; - } - /* Enable required interrupts */ ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); @@ -1839,6 +2485,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) * UCRDY, UTMRLDY and UTRLRDY bits must be 1 * DEI, HEI bits must be 0 */ + reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); if (!(ufshcd_get_lists_status(reg))) { ufshcd_enable_run_stop_reg(hba); } else { @@ -1885,6 +2532,12 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) msleep(5); } + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); + + if (hba->vops && hba->vops->hce_enable_notify) + hba->vops->hce_enable_notify(hba, PRE_CHANGE); + /* start controller initialization sequence */ ufshcd_hba_start(hba); @@ -1912,6 +2565,13 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) } msleep(5); } + + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); + + if (hba->vops && hba->vops->hce_enable_notify) + hba->vops->hce_enable_notify(hba, POST_CHANGE); + return 0; } @@ -1924,16 +2584,42 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) static int ufshcd_link_startup(struct ufs_hba *hba) { int ret; + int retries = DME_LINKSTARTUP_RETRIES; - /* enable UIC related interrupts */ - ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + do { + if (hba->vops && hba->vops->link_startup_notify) + hba->vops->link_startup_notify(hba, PRE_CHANGE); + + ret = ufshcd_dme_link_startup(hba); + + /* check if device is detected by inter-connect layer */ + if (!ret && !ufshcd_is_device_present(hba)) { + dev_err(hba->dev, "%s: Device not present\n", __func__); + ret = -ENXIO; + goto out; + } + + /* + * DME link lost indication is only received when link is up, + * but we can't be sure if the link is up until link startup + * succeeds. So reset the local Uni-Pro and try again. + */ + if (ret && ufshcd_hba_enable(hba)) + goto out; + } while (ret && retries--); - ret = ufshcd_dme_link_startup(hba); if (ret) + /* failed to get the link up... retire */ goto out; - ret = ufshcd_make_hba_operational(hba); + /* Include any host controller configuration via UIC commands */ + if (hba->vops && hba->vops->link_startup_notify) { + ret = hba->vops->link_startup_notify(hba, POST_CHANGE); + if (ret) + goto out; + } + ret = ufshcd_make_hba_operational(hba); out: if (ret) dev_err(hba->dev, "link startup failed %d\n", ret); @@ -1955,6 +2641,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) int err = 0; int retries; + ufshcd_hold(hba, false); mutex_lock(&hba->dev_cmd.lock); for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, @@ -1966,6 +2653,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); } mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); if (err) dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); @@ -1973,6 +2661,100 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) } /** + * ufshcd_set_queue_depth - set lun queue depth + * @sdev: pointer to SCSI device + * + * Read bLUQueueDepth value and activate scsi tagged command + * queueing. For WLUN, queue depth is set to 1. For best-effort + * cases (bLUQueueDepth = 0) the queue depth is set to a maximum + * value that host can queue. + */ +static void ufshcd_set_queue_depth(struct scsi_device *sdev) +{ + int ret = 0; + u8 lun_qdepth; + struct ufs_hba *hba; + + hba = shost_priv(sdev->host); + + lun_qdepth = hba->nutrs; + ret = ufshcd_read_unit_desc_param(hba, + ufshcd_scsi_to_upiu_lun(sdev->lun), + UNIT_DESC_PARAM_LU_Q_DEPTH, + &lun_qdepth, + sizeof(lun_qdepth)); + + /* Some WLUN doesn't support unit descriptor */ + if (ret == -EOPNOTSUPP) + lun_qdepth = 1; + else if (!lun_qdepth) + /* eventually, we can figure out the real queue depth */ + lun_qdepth = hba->nutrs; + else + lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); + + dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", + __func__, lun_qdepth); + scsi_activate_tcq(sdev, lun_qdepth); +} + +/* + * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR + * @hba: per-adapter instance + * @lun: UFS device lun id + * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info + * + * Returns 0 in case of success and b_lu_write_protect status would be returned + * @b_lu_write_protect parameter. + * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. + * Returns -EINVAL in case of invalid parameters passed to this function. + */ +static int ufshcd_get_lu_wp(struct ufs_hba *hba, + u8 lun, + u8 *b_lu_write_protect) +{ + int ret; + + if (!b_lu_write_protect) + ret = -EINVAL; + /* + * According to UFS device spec, RPMB LU can't be write + * protected so skip reading bLUWriteProtect parameter for + * it. For other W-LUs, UNIT DESCRIPTOR is not available. + */ + else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) + ret = -ENOTSUPP; + else + ret = ufshcd_read_unit_desc_param(hba, + lun, + UNIT_DESC_PARAM_LU_WR_PROTECT, + b_lu_write_protect, + sizeof(*b_lu_write_protect)); + return ret; +} + +/** + * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect + * status + * @hba: per-adapter instance + * @sdev: pointer to SCSI device + * + */ +static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, + struct scsi_device *sdev) +{ + if (hba->dev_info.f_power_on_wp_en && + !hba->dev_info.is_lu_power_on_wp) { + u8 b_lu_write_protect; + + if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), + &b_lu_write_protect) && + (b_lu_write_protect == UFS_LU_POWER_ON_WP)) + hba->dev_info.is_lu_power_on_wp = true; + } +} + +/** * ufshcd_slave_alloc - handle initial SCSI device configurations * @sdev: pointer to SCSI device * @@ -1981,7 +2763,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) static int ufshcd_slave_alloc(struct scsi_device *sdev) { struct ufs_hba *hba; - int lun_qdepth; hba = shost_priv(sdev->host); sdev->tagged_supported = 1; @@ -1996,16 +2777,10 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* REPORT SUPPORTED OPERATION CODES is not supported */ sdev->no_report_opcodes = 1; - lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev); - if (lun_qdepth <= 0) - /* eventually, we can figure out the real queue depth */ - lun_qdepth = hba->nutrs; - else - lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); - dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", - __func__, lun_qdepth); - scsi_activate_tcq(sdev, lun_qdepth); + ufshcd_set_queue_depth(sdev); + + ufshcd_get_lu_power_on_wp_status(hba, sdev); return 0; } @@ -2068,6 +2843,9 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) hba = shost_priv(sdev->host); scsi_deactivate_tcq(sdev, hba->nutrs); + /* Drop the reference as it won't be needed anymore */ + if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) + hba->sdev_ufs_device = NULL; } /** @@ -2234,8 +3012,8 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) complete(&hba->active_uic_cmd->done); } - if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) - complete(hba->pwr_done); + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) + complete(hba->uic_async_done); } /** @@ -2275,6 +3053,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) clear_bit_unlock(index, &hba->lrb_in_use); /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); + __ufshcd_release(hba); } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { if (hba->dev_cmd.complete) complete(hba->dev_cmd.complete); @@ -2284,6 +3063,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) /* clear corresponding bits of completed commands */ hba->outstanding_reqs ^= completed_reqs; + ufshcd_clk_scaling_update_busy(hba); + /* we might have free'd some tags above */ wake_up(&hba->dev_cmd.tag_wq); } @@ -2447,33 +3228,62 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) } /** - * ufshcd_urgent_bkops - handle urgent bkops exception event + * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status * @hba: per-adapter instance + * @status: bkops_status value * - * Enable fBackgroundOpsEn flag in the device to permit background - * operations. + * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn + * flag in the device to permit background operations if the device + * bkops_status is greater than or equal to "status" argument passed to + * this function, disable otherwise. + * + * Returns 0 for success, non-zero in case of failure. + * + * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag + * to know whether auto bkops is enabled or disabled after this function + * returns control to it. */ -static int ufshcd_urgent_bkops(struct ufs_hba *hba) +static int ufshcd_bkops_ctrl(struct ufs_hba *hba, + enum bkops_status status) { int err; - u32 status = 0; + u32 curr_status = 0; - err = ufshcd_get_bkops_status(hba, &status); + err = ufshcd_get_bkops_status(hba, &curr_status); if (err) { dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", __func__, err); goto out; + } else if (curr_status > BKOPS_STATUS_MAX) { + dev_err(hba->dev, "%s: invalid BKOPS status %d\n", + __func__, curr_status); + err = -EINVAL; + goto out; } - status = status & 0xF; - - /* handle only if status indicates performance impact or critical */ - if (status >= BKOPS_STATUS_PERF_IMPACT) + if (curr_status >= status) err = ufshcd_enable_auto_bkops(hba); + else + err = ufshcd_disable_auto_bkops(hba); out: return err; } +/** + * ufshcd_urgent_bkops - handle urgent bkops exception event + * @hba: per-adapter instance + * + * Enable fBackgroundOpsEn flag in the device to permit background + * operations. + * + * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled + * and negative error value for any other failure. + */ +static int ufshcd_urgent_bkops(struct ufs_hba *hba) +{ + return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); +} + static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) { return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, @@ -2505,7 +3315,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) status &= hba->ee_ctrl_mask; if (status & MASK_EE_URGENT_BKOPS) { err = ufshcd_urgent_bkops(hba); - if (err) + if (err < 0) dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", __func__, err); } @@ -2530,6 +3340,7 @@ static void ufshcd_err_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eh_work); pm_runtime_get_sync(hba->dev); + ufshcd_hold(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); if (hba->ufshcd_state == UFSHCD_STATE_RESET) { @@ -2583,6 +3394,7 @@ static void ufshcd_err_handler(struct work_struct *work) out: scsi_unblock_requests(hba->host); + ufshcd_release(hba); pm_runtime_put_sync(hba->dev); } @@ -2766,6 +3578,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, * the maximum wait time is bounded by %TM_CMD_TIMEOUT. */ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); task_req_descp = hba->utmrdl_base_addr; @@ -2785,7 +3598,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, lun_id, task_tag); task_req_upiup->header.dword_1 = UPIU_HEADER_DWORD(0, tm_function, 0, 0); - + /* + * The host shall provide the same value for LUN field in the basic + * header and for Input Parameter. + */ task_req_upiup->input_param1 = cpu_to_be32(lun_id); task_req_upiup->input_param2 = cpu_to_be32(task_id); @@ -2814,6 +3630,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, ufshcd_put_tm_slot(hba, free_slot); wake_up(&hba->tm_tag_wq); + ufshcd_release(hba); return err; } @@ -2896,6 +3713,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) hba = shost_priv(host); tag = cmd->request->tag; + ufshcd_hold(hba, false); /* If command is already aborted/completed, return SUCCESS */ if (!(test_bit(tag, &hba->outstanding_reqs))) goto out; @@ -2960,6 +3778,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) clear_bit_unlock(tag, &hba->lrb_in_use); wake_up(&hba->dev_cmd.tag_wq); + out: if (!err) { err = SUCCESS; @@ -2968,6 +3787,11 @@ out: err = FAILED; } + /* + * This ufshcd_release() corresponds to the original scsi cmd that got + * aborted here (as we won't get any IRQ for it). + */ + ufshcd_release(hba); return err; } @@ -2984,7 +3808,6 @@ out: static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) { int err; - async_cookie_t cookie; unsigned long flags; /* Reset the host controller */ @@ -2997,10 +3820,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) goto out; /* Establish the link again and restore the device */ - cookie = async_schedule(ufshcd_async_scan, hba); - /* wait for async scan to be completed */ - async_synchronize_cookie(++cookie); - if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + err = ufshcd_probe_hba(hba); + + if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) err = -EIO; out: if (err) @@ -3022,8 +3844,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba) { int err = 0; unsigned long flags; + int retries = MAX_HOST_RESET_RETRIES; - err = ufshcd_host_reset_and_restore(hba); + do { + err = ufshcd_host_reset_and_restore(hba); + } while (err && --retries); /* * After reset the door-bell might be cleared, complete @@ -3051,6 +3876,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) hba = shost_priv(cmd->device->host); + ufshcd_hold(hba, false); /* * Check if there is any race with fatal error handling. * If so, wait for it to complete. Even though fatal error @@ -3084,56 +3910,232 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) ufshcd_clear_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_release(hba); return err; } /** - * ufshcd_read_sdev_qdepth - read the lun command queue depth - * @hba: Pointer to adapter instance - * @sdev: pointer to SCSI device + * ufshcd_get_max_icc_level - calculate the ICC level + * @sup_curr_uA: max. current supported by the regulator + * @start_scan: row at the desc table to start scan from + * @buff: power descriptor buffer * - * Return in case of success the lun's queue depth else error. + * Returns calculated max ICC level for specific regulator */ -static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, - struct scsi_device *sdev) +static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) +{ + int i; + int curr_uA; + u16 data; + u16 unit; + + for (i = start_scan; i >= 0; i--) { + data = be16_to_cpu(*((u16 *)(buff + 2*i))); + unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> + ATTR_ICC_LVL_UNIT_OFFSET; + curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; + switch (unit) { + case UFSHCD_NANO_AMP: + curr_uA = curr_uA / 1000; + break; + case UFSHCD_MILI_AMP: + curr_uA = curr_uA * 1000; + break; + case UFSHCD_AMP: + curr_uA = curr_uA * 1000 * 1000; + break; + case UFSHCD_MICRO_AMP: + default: + break; + } + if (sup_curr_uA >= curr_uA) + break; + } + if (i < 0) { + i = 0; + pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); + } + + return (u32)i; +} + +/** + * ufshcd_calc_icc_level - calculate the max ICC level + * In case regulators are not initialized we'll return 0 + * @hba: per-adapter instance + * @desc_buf: power descriptor buffer to extract ICC levels from. + * @len: length of desc_buff + * + * Returns calculated ICC level + */ +static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, + u8 *desc_buf, int len) +{ + u32 icc_level = 0; + + if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || + !hba->vreg_info.vccq2) { + dev_err(hba->dev, + "%s: Regulator capability was not set, actvIccLevel=%d", + __func__, icc_level); + goto out; + } + + if (hba->vreg_info.vcc) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vcc->max_uA, + POWER_DESC_MAX_ACTV_ICC_LVLS - 1, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); + + if (hba->vreg_info.vccq) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vccq->max_uA, + icc_level, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); + + if (hba->vreg_info.vccq2) + icc_level = ufshcd_get_max_icc_level( + hba->vreg_info.vccq2->max_uA, + icc_level, + &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); +out: + return icc_level; +} + +static void ufshcd_init_icc_levels(struct ufs_hba *hba) { int ret; - int buff_len = UNIT_DESC_MAX_SIZE; - u8 desc_buf[UNIT_DESC_MAX_SIZE]; + int buff_len = QUERY_DESC_POWER_MAX_SIZE; + u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; - ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, - QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len); + ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); + if (ret) { + dev_err(hba->dev, + "%s: Failed reading power descriptor.len = %d ret = %d", + __func__, buff_len, ret); + return; + } + + hba->init_prefetch_data.icc_level = + ufshcd_find_max_sup_active_icc_level(hba, + desc_buf, buff_len); + dev_dbg(hba->dev, "%s: setting icc_level 0x%x", + __func__, hba->init_prefetch_data.icc_level); - if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) { + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, + &hba->init_prefetch_data.icc_level); + + if (ret) dev_err(hba->dev, - "%s:Failed reading unit descriptor. len = %d ret = %d" - , __func__, buff_len, ret); - if (!ret) - ret = -EINVAL; + "%s: Failed configuring bActiveICCLevel = %d ret = %d", + __func__, hba->init_prefetch_data.icc_level , ret); +} + +/** + * ufshcd_scsi_add_wlus - Adds required W-LUs + * @hba: per-adapter instance + * + * UFS device specification requires the UFS devices to support 4 well known + * logical units: + * "REPORT_LUNS" (address: 01h) + * "UFS Device" (address: 50h) + * "RPMB" (address: 44h) + * "BOOT" (address: 30h) + * UFS device's power management needs to be controlled by "POWER CONDITION" + * field of SSU (START STOP UNIT) command. But this "power condition" field + * will take effect only when its sent to "UFS device" well known logical unit + * hence we require the scsi_device instance to represent this logical unit in + * order for the UFS host driver to send the SSU command for power management. + + * We also require the scsi_device instance for "RPMB" (Replay Protected Memory + * Block) LU so user space process can control this LU. User space may also + * want to have access to BOOT LU. + + * This function adds scsi device instances for each of all well known LUs + * (except "REPORT LUNS" LU). + * + * Returns zero on success (all required W-LUs are added successfully), + * non-zero error value on failure (if failed to add any of the required W-LU). + */ +static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) +{ + int ret = 0; + + hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); + if (IS_ERR(hba->sdev_ufs_device)) { + ret = PTR_ERR(hba->sdev_ufs_device); + hba->sdev_ufs_device = NULL; goto out; } - ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF; + hba->sdev_boot = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); + if (IS_ERR(hba->sdev_boot)) { + ret = PTR_ERR(hba->sdev_boot); + hba->sdev_boot = NULL; + goto remove_sdev_ufs_device; + } + + hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); + if (IS_ERR(hba->sdev_rpmb)) { + ret = PTR_ERR(hba->sdev_rpmb); + hba->sdev_rpmb = NULL; + goto remove_sdev_boot; + } + goto out; + +remove_sdev_boot: + scsi_remove_device(hba->sdev_boot); +remove_sdev_ufs_device: + scsi_remove_device(hba->sdev_ufs_device); out: return ret; } /** - * ufshcd_async_scan - asynchronous execution for link startup - * @data: data pointer to pass to this function - * @cookie: cookie data + * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by + * ufshcd_scsi_add_wlus() + * @hba: per-adapter instance + * */ -static void ufshcd_async_scan(void *data, async_cookie_t cookie) +static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba) +{ + if (hba->sdev_ufs_device) { + scsi_remove_device(hba->sdev_ufs_device); + hba->sdev_ufs_device = NULL; + } + + if (hba->sdev_boot) { + scsi_remove_device(hba->sdev_boot); + hba->sdev_boot = NULL; + } + + if (hba->sdev_rpmb) { + scsi_remove_device(hba->sdev_rpmb); + hba->sdev_rpmb = NULL; + } +} + +/** + * ufshcd_probe_hba - probe hba to detect device and initialize + * @hba: per-adapter instance + * + * Execute link-startup and verify device initialization + */ +static int ufshcd_probe_hba(struct ufs_hba *hba) { - struct ufs_hba *hba = (struct ufs_hba *)data; int ret; ret = ufshcd_link_startup(hba); if (ret) goto out; - ufshcd_config_max_pwr_mode(hba); + /* UniPro link is active now */ + ufshcd_set_link_active(hba); ret = ufshcd_verify_dev_init(hba); if (ret) @@ -3143,16 +4145,77 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) if (ret) goto out; + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); ufshcd_force_reset_auto_bkops(hba); hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + hba->wlun_dev_clr_ua = true; + + if (ufshcd_get_max_pwr_mode(hba)) { + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); + } else { + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + } + + /* + * If we are in error handling context or in power management callbacks + * context, no need to scan the host + */ + if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { + bool flag; + + /* clear any previous UFS device information */ + memset(&hba->dev_info, 0, sizeof(hba->dev_info)); + if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) + hba->dev_info.f_power_on_wp_en = flag; + + if (!hba->is_init_prefetch) + ufshcd_init_icc_levels(hba); + + /* Add required well known logical units to scsi mid layer */ + if (ufshcd_scsi_add_wlus(hba)) + goto out; - /* If we are in error handling context no need to scan the host */ - if (!ufshcd_eh_in_progress(hba)) { scsi_scan_host(hba->host); pm_runtime_put_sync(hba->dev); } + + if (!hba->is_init_prefetch) + hba->is_init_prefetch = true; + + /* Resume devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_resume_device(hba->devfreq); + out: - return; + /* + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. + */ + if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { + pm_runtime_put_sync(hba->dev); + ufshcd_hba_exit(hba); + } + + return ret; +} + +/** + * ufshcd_async_scan - asynchronous execution for probing hba + * @data: data pointer to pass to this function + * @cookie: cookie data + */ +static void ufshcd_async_scan(void *data, async_cookie_t cookie) +{ + struct ufs_hba *hba = (struct ufs_hba *)data; + + ufshcd_probe_hba(hba); } static struct scsi_host_template ufshcd_driver_template = { @@ -3171,70 +4234,956 @@ static struct scsi_host_template ufshcd_driver_template = { .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, .can_queue = UFSHCD_CAN_QUEUE, + .max_host_blocked = 1, }; +static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, + int ua) +{ + int ret = 0; + struct regulator *reg = vreg->reg; + const char *name = vreg->name; + + BUG_ON(!vreg); + + ret = regulator_set_optimum_mode(reg, ua); + if (ret >= 0) { + /* + * regulator_set_optimum_mode() returns new regulator + * mode upon success. + */ + ret = 0; + } else { + dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n", + __func__, name, ua, ret); + } + + return ret; +} + +static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, + struct ufs_vreg *vreg) +{ + return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); +} + +static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, + struct ufs_vreg *vreg) +{ + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); +} + +static int ufshcd_config_vreg(struct device *dev, + struct ufs_vreg *vreg, bool on) +{ + int ret = 0; + struct regulator *reg = vreg->reg; + const char *name = vreg->name; + int min_uV, uA_load; + + BUG_ON(!vreg); + + if (regulator_count_voltages(reg) > 0) { + min_uV = on ? vreg->min_uV : 0; + ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); + if (ret) { + dev_err(dev, "%s: %s set voltage failed, err=%d\n", + __func__, name, ret); + goto out; + } + + uA_load = on ? vreg->max_uA : 0; + ret = ufshcd_config_vreg_load(dev, vreg, uA_load); + if (ret) + goto out; + } +out: + return ret; +} + +static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg || vreg->enabled) + goto out; + + ret = ufshcd_config_vreg(dev, vreg, true); + if (!ret) + ret = regulator_enable(vreg->reg); + + if (!ret) + vreg->enabled = true; + else + dev_err(dev, "%s: %s enable failed, err=%d\n", + __func__, vreg->name, ret); +out: + return ret; +} + +static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg || !vreg->enabled) + goto out; + + ret = regulator_disable(vreg->reg); + + if (!ret) { + /* ignore errors on applying disable config */ + ufshcd_config_vreg(dev, vreg, false); + vreg->enabled = false; + } else { + dev_err(dev, "%s: %s disable failed, err=%d\n", + __func__, vreg->name, ret); + } +out: + return ret; +} + +static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + if (!info) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vcc, on); + if (ret) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vccq, on); + if (ret) + goto out; + + ret = ufshcd_toggle_vreg(dev, info->vccq2, on); + if (ret) + goto out; + +out: + if (ret) { + ufshcd_toggle_vreg(dev, info->vccq2, false); + ufshcd_toggle_vreg(dev, info->vccq, false); + ufshcd_toggle_vreg(dev, info->vcc, false); + } + return ret; +} + +static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + + if (info) + return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); + + return 0; +} + +static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) +{ + int ret = 0; + + if (!vreg) + goto out; + + vreg->reg = devm_regulator_get(dev, vreg->name); + if (IS_ERR(vreg->reg)) { + ret = PTR_ERR(vreg->reg); + dev_err(dev, "%s: %s get failed, err=%d\n", + __func__, vreg->name, ret); + } +out: + return ret; +} + +static int ufshcd_init_vreg(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_vreg_info *info = &hba->vreg_info; + + if (!info) + goto out; + + ret = ufshcd_get_vreg(dev, info->vcc); + if (ret) + goto out; + + ret = ufshcd_get_vreg(dev, info->vccq); + if (ret) + goto out; + + ret = ufshcd_get_vreg(dev, info->vccq2); +out: + return ret; +} + +static int ufshcd_init_hba_vreg(struct ufs_hba *hba) +{ + struct ufs_vreg_info *info = &hba->vreg_info; + + if (info) + return ufshcd_get_vreg(hba->dev, info->vdd_hba); + + return 0; +} + +static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, + bool skip_ref_clk) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + unsigned long flags; + + if (!head || list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) + continue; + + if (on && !clki->enabled) { + ret = clk_prepare_enable(clki->clk); + if (ret) { + dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", + __func__, clki->name, ret); + goto out; + } + } else if (!on && clki->enabled) { + clk_disable_unprepare(clki->clk); + } + clki->enabled = on; + dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, + clki->name, on ? "en" : "dis"); + } + } + + if (hba->vops && hba->vops->setup_clocks) + ret = hba->vops->setup_clocks(hba, on); +out: + if (ret) { + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) + clk_disable_unprepare(clki->clk); + } + } else if (!ret && on) { + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.state = CLKS_ON; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } + return ret; +} + +static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) +{ + return __ufshcd_setup_clocks(hba, on, false); +} + +static int ufshcd_init_clocks(struct ufs_hba *hba) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct device *dev = hba->dev; + struct list_head *head = &hba->clk_list_head; + + if (!head || list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!clki->name) + continue; + + clki->clk = devm_clk_get(dev, clki->name); + if (IS_ERR(clki->clk)) { + ret = PTR_ERR(clki->clk); + dev_err(dev, "%s: %s clk get failed, %d\n", + __func__, clki->name, ret); + goto out; + } + + if (clki->max_freq) { + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + goto out; + } + clki->curr_freq = clki->max_freq; + } + dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } +out: + return ret; +} + +static int ufshcd_variant_hba_init(struct ufs_hba *hba) +{ + int err = 0; + + if (!hba->vops) + goto out; + + if (hba->vops->init) { + err = hba->vops->init(hba); + if (err) + goto out; + } + + if (hba->vops->setup_regulators) { + err = hba->vops->setup_regulators(hba, true); + if (err) + goto out_exit; + } + + goto out; + +out_exit: + if (hba->vops->exit) + hba->vops->exit(hba); +out: + if (err) + dev_err(hba->dev, "%s: variant %s init failed err %d\n", + __func__, hba->vops ? hba->vops->name : "", err); + return err; +} + +static void ufshcd_variant_hba_exit(struct ufs_hba *hba) +{ + if (!hba->vops) + return; + + if (hba->vops->setup_clocks) + hba->vops->setup_clocks(hba, false); + + if (hba->vops->setup_regulators) + hba->vops->setup_regulators(hba, false); + + if (hba->vops->exit) + hba->vops->exit(hba); +} + +static int ufshcd_hba_init(struct ufs_hba *hba) +{ + int err; + + /* + * Handle host controller power separately from the UFS device power + * rails as it will help controlling the UFS host controller power + * collapse easily which is different than UFS device power collapse. + * Also, enable the host controller power before we go ahead with rest + * of the initialization here. + */ + err = ufshcd_init_hba_vreg(hba); + if (err) + goto out; + + err = ufshcd_setup_hba_vreg(hba, true); + if (err) + goto out; + + err = ufshcd_init_clocks(hba); + if (err) + goto out_disable_hba_vreg; + + err = ufshcd_setup_clocks(hba, true); + if (err) + goto out_disable_hba_vreg; + + err = ufshcd_init_vreg(hba); + if (err) + goto out_disable_clks; + + err = ufshcd_setup_vreg(hba, true); + if (err) + goto out_disable_clks; + + err = ufshcd_variant_hba_init(hba); + if (err) + goto out_disable_vreg; + + hba->is_powered = true; + goto out; + +out_disable_vreg: + ufshcd_setup_vreg(hba, false); +out_disable_clks: + ufshcd_setup_clocks(hba, false); +out_disable_hba_vreg: + ufshcd_setup_hba_vreg(hba, false); +out: + return err; +} + +static void ufshcd_hba_exit(struct ufs_hba *hba) +{ + if (hba->is_powered) { + ufshcd_variant_hba_exit(hba); + ufshcd_setup_vreg(hba, false); + ufshcd_setup_clocks(hba, false); + ufshcd_setup_hba_vreg(hba, false); + hba->is_powered = false; + } +} + +static int +ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) +{ + unsigned char cmd[6] = {REQUEST_SENSE, + 0, + 0, + 0, + SCSI_SENSE_BUFFERSIZE, + 0}; + char *buffer; + int ret; + + buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + goto out; + } + + ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, + SCSI_SENSE_BUFFERSIZE, NULL, + msecs_to_jiffies(1000), 3, NULL, REQ_PM); + if (ret) + pr_err("%s: failed with err %d\n", __func__, ret); + + kfree(buffer); +out: + return ret; +} + +/** + * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device + * power mode + * @hba: per adapter instance + * @pwr_mode: device power mode to set + * + * Returns 0 if requested power mode is set successfully + * Returns non-zero if failed to set the requested power mode + */ +static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, + enum ufs_dev_pwr_mode pwr_mode) +{ + unsigned char cmd[6] = { START_STOP }; + struct scsi_sense_hdr sshdr; + struct scsi_device *sdp = hba->sdev_ufs_device; + int ret; + + if (!sdp || !scsi_device_online(sdp)) + return -ENODEV; + + /* + * If scsi commands fail, the scsi mid-layer schedules scsi error- + * handling, which would wait for host to be resumed. Since we know + * we are functional while we are here, skip host resume in error + * handling context. + */ + hba->host->eh_noresume = 1; + if (hba->wlun_dev_clr_ua) { + ret = ufshcd_send_request_sense(hba, sdp); + if (ret) + goto out; + /* Unit attention condition is cleared now */ + hba->wlun_dev_clr_ua = false; + } + + cmd[4] = pwr_mode << 4; + + /* + * Current function would be generally called from the power management + * callbacks hence set the REQ_PM flag so that it doesn't resume the + * already suspended childs. + */ + ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, + START_STOP_TIMEOUT, 0, NULL, REQ_PM); + if (ret) { + sdev_printk(KERN_WARNING, sdp, + "START_STOP failed for power mode: %d\n", pwr_mode); + scsi_show_result(ret); + if (driver_byte(ret) & DRIVER_SENSE) { + scsi_show_sense_hdr(&sshdr); + scsi_show_extd_sense(sshdr.asc, sshdr.ascq); + } + } + + if (!ret) + hba->curr_dev_pwr_mode = pwr_mode; +out: + hba->host->eh_noresume = 0; + return ret; +} + +static int ufshcd_link_state_transition(struct ufs_hba *hba, + enum uic_link_state req_link_state, + int check_for_bkops) +{ + int ret = 0; + + if (req_link_state == hba->uic_link_state) + return 0; + + if (req_link_state == UIC_LINK_HIBERN8_STATE) { + ret = ufshcd_uic_hibern8_enter(hba); + if (!ret) + ufshcd_set_link_hibern8(hba); + else + goto out; + } + /* + * If autobkops is enabled, link can't be turned off because + * turning off the link would also turn off the device. + */ + else if ((req_link_state == UIC_LINK_OFF_STATE) && + (!check_for_bkops || (check_for_bkops && + !hba->auto_bkops_enabled))) { + /* + * Change controller state to "reset state" which + * should also put the link in off/reset state + */ + ufshcd_hba_stop(hba); + /* + * TODO: Check if we need any delay to make sure that + * controller is reset + */ + ufshcd_set_link_off(hba); + } + +out: + return ret; +} + +static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) +{ + /* + * If UFS device is either in UFS_Sleep turn off VCC rail to save some + * power. + * + * If UFS device and link is in OFF state, all power supplies (VCC, + * VCCQ, VCCQ2) can be turned off if power on write protect is not + * required. If UFS link is inactive (Hibern8 or OFF state) and device + * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. + * + * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway + * in low power state which would save some power. + */ + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && + !hba->dev_info.is_lu_power_on_wp) { + ufshcd_setup_vreg(hba, false); + } else if (!ufshcd_is_ufs_dev_active(hba)) { + ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); + if (!ufshcd_is_link_active(hba)) { + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); + } + } +} + +static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) +{ + int ret = 0; + + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && + !hba->dev_info.is_lu_power_on_wp) { + ret = ufshcd_setup_vreg(hba, true); + } else if (!ufshcd_is_ufs_dev_active(hba)) { + ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); + if (!ret && !ufshcd_is_link_active(hba)) { + ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); + if (ret) + goto vcc_disable; + ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); + if (ret) + goto vccq_lpm; + } + } + goto out; + +vccq_lpm: + ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); +vcc_disable: + ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); +out: + return ret; +} + +static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) +{ + if (ufshcd_is_link_off(hba)) + ufshcd_setup_hba_vreg(hba, false); +} + +static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) +{ + if (ufshcd_is_link_off(hba)) + ufshcd_setup_hba_vreg(hba, true); +} + /** - * ufshcd_suspend - suspend power management function + * ufshcd_suspend - helper function for suspend operations * @hba: per adapter instance - * @state: power state + * @pm_op: desired low power operation type + * + * This function will try to put the UFS device and link into low power + * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl" + * (System PM level). + * + * If this function is called during shutdown, it will make sure that + * both UFS device and UFS link is powered off. * - * Returns -ENOSYS + * NOTE: UFS device & link must be active before we enter in this function. + * + * Returns 0 for success and non-zero for failure */ -int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) +static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { + int ret = 0; + enum ufs_pm_level pm_lvl; + enum ufs_dev_pwr_mode req_dev_pwr_mode; + enum uic_link_state req_link_state; + + hba->pm_op_in_progress = 1; + if (!ufshcd_is_shutdown_pm(pm_op)) { + pm_lvl = ufshcd_is_runtime_pm(pm_op) ? + hba->rpm_lvl : hba->spm_lvl; + req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); + req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); + } else { + req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; + req_link_state = UIC_LINK_OFF_STATE; + } + /* - * TODO: - * 1. Block SCSI requests from SCSI midlayer - * 2. Change the internal driver state to non operational - * 3. Set UTRLRSR and UTMRLRSR bits to zero - * 4. Wait until outstanding commands are completed - * 5. Set HCE to zero to send the UFS host controller to reset state + * If we can't transition into any of the low power modes + * just gate the clocks. */ + ufshcd_hold(hba, false); + hba->clk_gating.is_suspended = true; + + if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && + req_link_state == UIC_LINK_ACTIVE_STATE) { + goto disable_clks; + } + + if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && + (req_link_state == hba->uic_link_state)) + goto out; + + /* UFS device & link must be active before we enter in this function */ + if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { + ret = -EINVAL; + goto out; + } - return -ENOSYS; + if (ufshcd_is_runtime_pm(pm_op)) { + if (ufshcd_can_autobkops_during_suspend(hba)) { + /* + * The device is idle with no requests in the queue, + * allow background operations if bkops status shows + * that performance might be impacted. + */ + ret = ufshcd_urgent_bkops(hba); + if (ret) + goto enable_gating; + } else { + /* make sure that auto bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + } + } + + if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && + ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || + !ufshcd_is_runtime_pm(pm_op))) { + /* ensure that bkops is disabled */ + ufshcd_disable_auto_bkops(hba); + ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); + if (ret) + goto enable_gating; + } + + ret = ufshcd_link_state_transition(hba, req_link_state, 1); + if (ret) + goto set_dev_active; + + ufshcd_vreg_set_lpm(hba); + +disable_clks: + /* + * The clock scaling needs access to controller registers. Hence, Wait + * for pending clock scaling work to be done before clocks are + * turned off. + */ + if (ufshcd_is_clkscaling_enabled(hba)) { + devfreq_suspend_device(hba->devfreq); + hba->clk_scaling.window_start_t = 0; + } + /* + * Call vendor specific suspend callback. As these callbacks may access + * vendor specific host controller register space call them before the + * host clocks are ON. + */ + if (hba->vops && hba->vops->suspend) { + ret = hba->vops->suspend(hba, pm_op); + if (ret) + goto set_link_active; + } + + if (hba->vops && hba->vops->setup_clocks) { + ret = hba->vops->setup_clocks(hba, false); + if (ret) + goto vops_resume; + } + + if (!ufshcd_is_link_active(hba)) + ufshcd_setup_clocks(hba, false); + else + /* If link is active, device ref_clk can't be switched off */ + __ufshcd_setup_clocks(hba, false, true); + + hba->clk_gating.state = CLKS_OFF; + /* + * Disable the host irq as host controller as there won't be any + * host controller trasanction expected till resume. + */ + ufshcd_disable_irq(hba); + /* Put the host controller in low power mode if possible */ + ufshcd_hba_vreg_set_lpm(hba); + goto out; + +vops_resume: + if (hba->vops && hba->vops->resume) + hba->vops->resume(hba, pm_op); +set_link_active: + ufshcd_vreg_set_hpm(hba); + if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) + ufshcd_set_link_active(hba); + else if (ufshcd_is_link_off(hba)) + ufshcd_host_reset_and_restore(hba); +set_dev_active: + if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) + ufshcd_disable_auto_bkops(hba); +enable_gating: + hba->clk_gating.is_suspended = false; + ufshcd_release(hba); +out: + hba->pm_op_in_progress = 0; + return ret; } -EXPORT_SYMBOL_GPL(ufshcd_suspend); /** - * ufshcd_resume - resume power management function + * ufshcd_resume - helper function for resume operations * @hba: per adapter instance + * @pm_op: runtime PM or system PM + * + * This function basically brings the UFS device, UniPro link and controller + * to active state. * - * Returns -ENOSYS + * Returns 0 for success and non-zero for failure */ -int ufshcd_resume(struct ufs_hba *hba) +static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { + int ret; + enum uic_link_state old_link_state; + + hba->pm_op_in_progress = 1; + old_link_state = hba->uic_link_state; + + ufshcd_hba_vreg_set_hpm(hba); + /* Make sure clocks are enabled before accessing controller */ + ret = ufshcd_setup_clocks(hba, true); + if (ret) + goto out; + + /* enable the host irq as host controller would be active soon */ + ret = ufshcd_enable_irq(hba); + if (ret) + goto disable_irq_and_vops_clks; + + ret = ufshcd_vreg_set_hpm(hba); + if (ret) + goto disable_irq_and_vops_clks; + + /* + * Call vendor specific resume callback. As these callbacks may access + * vendor specific host controller register space call them when the + * host clocks are ON. + */ + if (hba->vops && hba->vops->resume) { + ret = hba->vops->resume(hba, pm_op); + if (ret) + goto disable_vreg; + } + + if (ufshcd_is_link_hibern8(hba)) { + ret = ufshcd_uic_hibern8_exit(hba); + if (!ret) + ufshcd_set_link_active(hba); + else + goto vendor_suspend; + } else if (ufshcd_is_link_off(hba)) { + ret = ufshcd_host_reset_and_restore(hba); + /* + * ufshcd_host_reset_and_restore() should have already + * set the link state as active + */ + if (ret || !ufshcd_is_link_active(hba)) + goto vendor_suspend; + } + + if (!ufshcd_is_ufs_dev_active(hba)) { + ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); + if (ret) + goto set_old_link_state; + } + /* - * TODO: - * 1. Set HCE to 1, to start the UFS host controller - * initialization process - * 2. Set UTRLRSR and UTMRLRSR bits to 1 - * 3. Change the internal driver state to operational - * 4. Unblock SCSI requests from SCSI midlayer + * If BKOPs operations are urgently needed at this moment then + * keep auto-bkops enabled or else disable it. */ + ufshcd_urgent_bkops(hba); + hba->clk_gating.is_suspended = false; + + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_resume_device(hba->devfreq); + + /* Schedule clock gating in case of no access to UFS device yet */ + ufshcd_release(hba); + goto out; + +set_old_link_state: + ufshcd_link_state_transition(hba, old_link_state, 0); +vendor_suspend: + if (hba->vops && hba->vops->suspend) + hba->vops->suspend(hba, pm_op); +disable_vreg: + ufshcd_vreg_set_lpm(hba); +disable_irq_and_vops_clks: + ufshcd_disable_irq(hba); + ufshcd_setup_clocks(hba, false); +out: + hba->pm_op_in_progress = 0; + return ret; +} + +/** + * ufshcd_system_suspend - system suspend routine + * @hba: per adapter instance + * @pm_op: runtime PM or system PM + * + * Check the description of ufshcd_suspend() function for more details. + * + * Returns 0 for success and non-zero for failure + */ +int ufshcd_system_suspend(struct ufs_hba *hba) +{ + int ret = 0; + + if (!hba || !hba->is_powered) + goto out; + + if (pm_runtime_suspended(hba->dev)) { + if (hba->rpm_lvl == hba->spm_lvl) + /* + * There is possibility that device may still be in + * active state during the runtime suspend. + */ + if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == + hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) + goto out; + + /* + * UFS device and/or UFS link low power states during runtime + * suspend seems to be different than what is expected during + * system suspend. Hence runtime resume the devic & link and + * let the system suspend low power states to take effect. + * TODO: If resume takes longer time, we might have optimize + * it in future by not resuming everything if possible. + */ + ret = ufshcd_runtime_resume(hba); + if (ret) + goto out; + } + + ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); +out: + if (!ret) + hba->is_sys_suspended = true; + return ret; +} +EXPORT_SYMBOL(ufshcd_system_suspend); + +/** + * ufshcd_system_resume - system resume routine + * @hba: per adapter instance + * + * Returns 0 for success and non-zero for failure + */ + +int ufshcd_system_resume(struct ufs_hba *hba) +{ + if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) + /* + * Let the runtime resume take care of resuming + * if runtime suspended. + */ + return 0; - return -ENOSYS; + return ufshcd_resume(hba, UFS_SYSTEM_PM); } -EXPORT_SYMBOL_GPL(ufshcd_resume); +EXPORT_SYMBOL(ufshcd_system_resume); +/** + * ufshcd_runtime_suspend - runtime suspend routine + * @hba: per adapter instance + * + * Check the description of ufshcd_suspend() function for more details. + * + * Returns 0 for success and non-zero for failure + */ int ufshcd_runtime_suspend(struct ufs_hba *hba) { - if (!hba) + if (!hba || !hba->is_powered) return 0; - /* - * The device is idle with no requests in the queue, - * allow background operations. - */ - return ufshcd_enable_auto_bkops(hba); + return ufshcd_suspend(hba, UFS_RUNTIME_PM); } EXPORT_SYMBOL(ufshcd_runtime_suspend); +/** + * ufshcd_runtime_resume - runtime resume routine + * @hba: per adapter instance + * + * This function basically brings the UFS device, UniPro link and controller + * to active state. Following operations are done in this function: + * + * 1. Turn on all the controller related clocks + * 2. Bring the UniPro link out of Hibernate state + * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device + * to active state. + * 4. If auto-bkops is enabled on the device, disable it. + * + * So following would be the possible power state after this function return + * successfully: + * S1: UFS device in Active state with VCC rail ON + * UniPro link in Active state + * All the UFS/UniPro controller clocks are ON + * + * Returns 0 for success and non-zero for failure + */ int ufshcd_runtime_resume(struct ufs_hba *hba) { - if (!hba) + if (!hba || !hba->is_powered) return 0; - - return ufshcd_disable_auto_bkops(hba); + else + return ufshcd_resume(hba, UFS_RUNTIME_PM); } EXPORT_SYMBOL(ufshcd_runtime_resume); @@ -3245,6 +5194,36 @@ int ufshcd_runtime_idle(struct ufs_hba *hba) EXPORT_SYMBOL(ufshcd_runtime_idle); /** + * ufshcd_shutdown - shutdown routine + * @hba: per adapter instance + * + * This function would power off both UFS device and UFS link. + * + * Returns 0 always to allow force shutdown even in case of errors. + */ +int ufshcd_shutdown(struct ufs_hba *hba) +{ + int ret = 0; + + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) + goto out; + + if (pm_runtime_suspended(hba->dev)) { + ret = ufshcd_runtime_resume(hba); + if (ret) + goto out; + } + + ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); +out: + if (ret) + dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); + /* allow force shutdown even in case of errors */ + return 0; +} +EXPORT_SYMBOL(ufshcd_shutdown); + +/** * ufshcd_remove - de-allocate SCSI host and host memory space * data structure memory * @hba - per adapter instance @@ -3252,11 +5231,17 @@ EXPORT_SYMBOL(ufshcd_runtime_idle); void ufshcd_remove(struct ufs_hba *hba) { scsi_remove_host(hba->host); + ufshcd_scsi_remove_wlus(hba); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); scsi_host_put(hba->host); + + ufshcd_exit_clk_gating(hba); + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_remove_device(hba->devfreq); + ufshcd_hba_exit(hba); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -3277,19 +5262,16 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba) } /** - * ufshcd_init - Driver initialization routine + * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) * @dev: pointer to device handle * @hba_handle: driver private handle - * @mmio_base: base register address - * @irq: Interrupt line of device * Returns 0 on success, non-zero value on failure */ -int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, - void __iomem *mmio_base, unsigned int irq) +int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) { struct Scsi_Host *host; struct ufs_hba *hba; - int err; + int err = 0; if (!dev) { dev_err(dev, @@ -3298,13 +5280,6 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, goto out_error; } - if (!mmio_base) { - dev_err(dev, - "Invalid memory reference for mmio_base is NULL\n"); - err = -ENODEV; - goto out_error; - } - host = scsi_host_alloc(&ufshcd_driver_template, sizeof(struct ufs_hba)); if (!host) { @@ -3315,9 +5290,146 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, hba = shost_priv(host); hba->host = host; hba->dev = dev; + *hba_handle = hba; + +out_error: + return err; +} +EXPORT_SYMBOL(ufshcd_alloc_host); + +static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) +{ + int ret = 0; + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + + if (!head || list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { + if (scale_up && clki->max_freq) { + if (clki->curr_freq == clki->max_freq) + continue; + ret = clk_set_rate(clki->clk, clki->max_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->max_freq, ret); + break; + } + clki->curr_freq = clki->max_freq; + + } else if (!scale_up && clki->min_freq) { + if (clki->curr_freq == clki->min_freq) + continue; + ret = clk_set_rate(clki->clk, clki->min_freq); + if (ret) { + dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", + __func__, clki->name, + clki->min_freq, ret); + break; + } + clki->curr_freq = clki->min_freq; + } + } + dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, + clki->name, clk_get_rate(clki->clk)); + } + if (hba->vops->clk_scale_notify) + hba->vops->clk_scale_notify(hba); +out: + return ret; +} + +static int ufshcd_devfreq_target(struct device *dev, + unsigned long *freq, u32 flags) +{ + int err = 0; + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!ufshcd_is_clkscaling_enabled(hba)) + return -EINVAL; + + if (*freq == UINT_MAX) + err = ufshcd_scale_clks(hba, true); + else if (*freq == 0) + err = ufshcd_scale_clks(hba, false); + + return err; +} + +static int ufshcd_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; + + if (!ufshcd_is_clkscaling_enabled(hba)) + return -EINVAL; + + memset(stat, 0, sizeof(*stat)); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!scaling->window_start_t) + goto start_window; + + if (scaling->is_busy_started) + scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), + scaling->busy_start_t)); + + stat->total_time = jiffies_to_usecs((long)jiffies - + (long)scaling->window_start_t); + stat->busy_time = scaling->tot_busy_t; +start_window: + scaling->window_start_t = jiffies; + scaling->tot_busy_t = 0; + + if (hba->outstanding_reqs) { + scaling->busy_start_t = ktime_get(); + scaling->is_busy_started = true; + } else { + scaling->busy_start_t = ktime_set(0, 0); + scaling->is_busy_started = false; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; +} + +static struct devfreq_dev_profile ufs_devfreq_profile = { + .polling_ms = 100, + .target = ufshcd_devfreq_target, + .get_dev_status = ufshcd_devfreq_get_dev_status, +}; + +/** + * ufshcd_init - Driver initialization routine + * @hba: per-adapter instance + * @mmio_base: base register address + * @irq: Interrupt line of device + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) +{ + int err; + struct Scsi_Host *host = hba->host; + struct device *dev = hba->dev; + + if (!mmio_base) { + dev_err(hba->dev, + "Invalid memory reference for mmio_base is NULL\n"); + err = -ENODEV; + goto out_error; + } + hba->mmio_base = mmio_base; hba->irq = irq; + err = ufshcd_hba_init(hba); + if (err) + goto out_error; + /* Read capabilities registers */ ufshcd_hba_capabilities(hba); @@ -3346,11 +5458,13 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, host->can_queue = hba->nutrs; host->cmd_per_lun = hba->nutrs; host->max_id = UFSHCD_MAX_ID; - host->max_lun = UFSHCD_MAX_LUNS; + host->max_lun = UFS_MAX_LUNS; host->max_channel = UFSHCD_MAX_CHANNEL; host->unique_id = host->host_no; host->max_cmd_len = MAX_CDB_SIZE; + hba->max_pwr_info.is_valid = false; + /* Initailize wait queue for task management */ init_waitqueue_head(&hba->tm_wq); init_waitqueue_head(&hba->tm_tag_wq); @@ -3368,24 +5482,27 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, /* Initialize device management tag acquire wait queue */ init_waitqueue_head(&hba->dev_cmd.tag_wq); + ufshcd_init_clk_gating(hba); /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); if (err) { dev_err(hba->dev, "request irq failed\n"); - goto out_disable; + goto exit_gating; + } else { + hba->is_irq_enabled = true; } /* Enable SCSI tag mapping */ err = scsi_init_shared_tag_map(host, host->can_queue); if (err) { dev_err(hba->dev, "init shared queue failed\n"); - goto out_disable; + goto exit_gating; } err = scsi_add_host(host, hba->dev); if (err) { dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_disable; + goto exit_gating; } /* Host controller enable */ @@ -3395,19 +5512,40 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, goto out_remove_scsi_host; } - *hba_handle = hba; + if (ufshcd_is_clkscaling_enabled(hba)) { + hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, + "simple_ondemand", NULL); + if (IS_ERR(hba->devfreq)) { + dev_err(hba->dev, "Unable to register with devfreq %ld\n", + PTR_ERR(hba->devfreq)); + goto out_remove_scsi_host; + } + /* Suspend devfreq until the UFS device is detected */ + devfreq_suspend_device(hba->devfreq); + hba->clk_scaling.window_start_t = 0; + } /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); + /* + * The device-initialize-sequence hasn't been invoked yet. + * Set the device to power-off state + */ + ufshcd_set_ufs_dev_poweroff(hba); + async_schedule(ufshcd_async_scan, hba); return 0; out_remove_scsi_host: scsi_remove_host(hba->host); +exit_gating: + ufshcd_exit_clk_gating(hba); out_disable: + hba->is_irq_enabled = false; scsi_host_put(host); + ufshcd_hba_exit(hba); out_error: return err; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index acf318e338ed..58ecdff5065c 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -52,6 +52,7 @@ #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/completion.h> +#include <linux/regulator/consumer.h> #include <asm/irq.h> #include <asm/byteorder.h> @@ -68,6 +69,8 @@ #define UFSHCD "ufshcd" #define UFSHCD_DRIVER_VERSION "0.2" +struct ufs_hba; + enum dev_cmd_type { DEV_CMD_TYPE_NOP = 0x0, DEV_CMD_TYPE_QUERY = 0x1, @@ -93,6 +96,54 @@ struct uic_command { struct completion done; }; +/* Used to differentiate the power management options */ +enum ufs_pm_op { + UFS_RUNTIME_PM, + UFS_SYSTEM_PM, + UFS_SHUTDOWN_PM, +}; + +#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM) +#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM) +#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM) + +/* Host <-> Device UniPro Link state */ +enum uic_link_state { + UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ + UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ + UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ +}; + +#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) +#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ + UIC_LINK_ACTIVE_STATE) +#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ + UIC_LINK_HIBERN8_STATE) +#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) +#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ + UIC_LINK_ACTIVE_STATE) +#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ + UIC_LINK_HIBERN8_STATE) + +/* + * UFS Power management levels. + * Each level is in increasing order of power savings. + */ +enum ufs_pm_level { + UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */ + UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */ + UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */ + UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */ + UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */ + UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */ + UFS_PM_LVL_MAX +}; + +struct ufs_pm_lvl_states { + enum ufs_dev_pwr_mode dev_state; + enum uic_link_state link_state; +}; + /** * struct ufshcd_lrb - local reference block * @utr_descriptor_ptr: UTRD address of the command @@ -121,7 +172,7 @@ struct ufshcd_lrb { int command_type; int task_tag; - unsigned int lun; + u8 lun; /* UPIU LUN id field is only 8-bit wide */ bool intr_cmd; }; @@ -153,6 +204,126 @@ struct ufs_dev_cmd { }; /** + * struct ufs_clk_info - UFS clock related info + * @list: list headed by hba->clk_list_head + * @clk: clock node + * @name: clock name + * @max_freq: maximum frequency supported by the clock + * @min_freq: min frequency that can be used for clock scaling + * @curr_freq: indicates the current frequency that it is set to + * @enabled: variable to check against multiple enable/disable + */ +struct ufs_clk_info { + struct list_head list; + struct clk *clk; + const char *name; + u32 max_freq; + u32 min_freq; + u32 curr_freq; + bool enabled; +}; + +#define PRE_CHANGE 0 +#define POST_CHANGE 1 + +struct ufs_pa_layer_attr { + u32 gear_rx; + u32 gear_tx; + u32 lane_rx; + u32 lane_tx; + u32 pwr_rx; + u32 pwr_tx; + u32 hs_rate; +}; + +struct ufs_pwr_mode_info { + bool is_valid; + struct ufs_pa_layer_attr info; +}; + +/** + * struct ufs_hba_variant_ops - variant specific callbacks + * @name: variant name + * @init: called when the driver is initialized + * @exit: called to cleanup everything done in init + * @clk_scale_notify: notifies that clks are scaled up/down + * @setup_clocks: called before touching any of the controller registers + * @setup_regulators: called before accessing the host controller + * @hce_enable_notify: called before and after HCE enable bit is set to allow + * variant specific Uni-Pro initialization. + * @link_startup_notify: called before and after Link startup is carried out + * to allow variant specific Uni-Pro initialization. + * @pwr_change_notify: called before and after a power mode change + * is carried out to allow vendor spesific capabilities + * to be set. + * @suspend: called during host controller PM callback + * @resume: called during host controller PM callback + */ +struct ufs_hba_variant_ops { + const char *name; + int (*init)(struct ufs_hba *); + void (*exit)(struct ufs_hba *); + void (*clk_scale_notify)(struct ufs_hba *); + int (*setup_clocks)(struct ufs_hba *, bool); + int (*setup_regulators)(struct ufs_hba *, bool); + int (*hce_enable_notify)(struct ufs_hba *, bool); + int (*link_startup_notify)(struct ufs_hba *, bool); + int (*pwr_change_notify)(struct ufs_hba *, + bool, struct ufs_pa_layer_attr *, + struct ufs_pa_layer_attr *); + int (*suspend)(struct ufs_hba *, enum ufs_pm_op); + int (*resume)(struct ufs_hba *, enum ufs_pm_op); +}; + +/* clock gating state */ +enum clk_gating_state { + CLKS_OFF, + CLKS_ON, + REQ_CLKS_OFF, + REQ_CLKS_ON, +}; + +/** + * struct ufs_clk_gating - UFS clock gating related info + * @gate_work: worker to turn off clocks after some delay as specified in + * delay_ms + * @ungate_work: worker to turn on clocks that will be used in case of + * interrupt context + * @state: the current clocks state + * @delay_ms: gating delay in ms + * @is_suspended: clk gating is suspended when set to 1 which can be used + * during suspend/resume + * @delay_attr: sysfs attribute to control delay_attr + * @active_reqs: number of requests that are pending and should be waited for + * completion before gating clocks. + */ +struct ufs_clk_gating { + struct delayed_work gate_work; + struct work_struct ungate_work; + enum clk_gating_state state; + unsigned long delay_ms; + bool is_suspended; + struct device_attribute delay_attr; + int active_reqs; +}; + +struct ufs_clk_scaling { + ktime_t busy_start_t; + bool is_busy_started; + unsigned long tot_busy_t; + unsigned long window_start_t; +}; + +/** + * struct ufs_init_prefetch - contains data that is pre-fetched once during + * initialization + * @icc_level: icc level which was read during initialization + */ +struct ufs_init_prefetch { + u32 icc_level; +}; + +/** * struct ufs_hba - per adapter private structure * @mmio_base: UFSHCI base register address * @ucdl_base_addr: UFS Command Descriptor base address @@ -171,6 +342,8 @@ struct ufs_dev_cmd { * @nutrs: Transfer Request Queue depth supported by controller * @nutmrs: Task Management Queue depth supported by controller * @ufs_version: UFS Version to which controller complies + * @vops: pointer to variant specific operations + * @priv: pointer to variant specific private data * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command * @uic_cmd_mutex: mutex for uic command @@ -183,6 +356,9 @@ struct ufs_dev_cmd { * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits * @ee_ctrl_mask: Exception event control mask + * @is_powered: flag to check if HBA is powered + * @is_init_prefetch: flag to check if data was pre-fetched in initialization + * @init_prefetch_data: data pre-fetched during initialization * @eh_work: Worker to handle UFS errors that require s/w attention * @eeh_work: Worker to handle exception events * @errors: HBA errors @@ -191,6 +367,10 @@ struct ufs_dev_cmd { * @saved_uic_err: sticky UIC error mask * @dev_cmd: ufs device management command information * @auto_bkops_enabled: to track whether bkops is enabled in device + * @vreg_info: UFS device voltage regulator information + * @clk_list_head: UFS host controller clocks list node head + * @pwr_info: holds current power mode + * @max_pwr_info: keeps the device max valid pwm */ struct ufs_hba { void __iomem *mmio_base; @@ -207,6 +387,21 @@ struct ufs_hba { struct Scsi_Host *host; struct device *dev; + /* + * This field is to keep a reference to "scsi_device" corresponding to + * "UFS device" W-LU. + */ + struct scsi_device *sdev_ufs_device; + struct scsi_device *sdev_rpmb; + struct scsi_device *sdev_boot; + + enum ufs_dev_pwr_mode curr_dev_pwr_mode; + enum uic_link_state uic_link_state; + /* Desired UFS power management level during runtime PM */ + enum ufs_pm_level rpm_lvl; + /* Desired UFS power management level during system PM */ + enum ufs_pm_level spm_lvl; + int pm_op_in_progress; struct ufshcd_lrb *lrb; unsigned long lrb_in_use; @@ -218,22 +413,28 @@ struct ufs_hba { int nutrs; int nutmrs; u32 ufs_version; + struct ufs_hba_variant_ops *vops; + void *priv; unsigned int irq; + bool is_irq_enabled; - struct uic_command *active_uic_cmd; - struct mutex uic_cmd_mutex; wait_queue_head_t tm_wq; wait_queue_head_t tm_tag_wq; unsigned long tm_condition; unsigned long tm_slots_in_use; - struct completion *pwr_done; + struct uic_command *active_uic_cmd; + struct mutex uic_cmd_mutex; + struct completion *uic_async_done; u32 ufshcd_state; u32 eh_flags; u32 intr_mask; u16 ee_ctrl_mask; + bool is_powered; + bool is_init_prefetch; + struct ufs_init_prefetch init_prefetch_data; /* Work Queues */ struct work_struct eh_work; @@ -248,16 +449,76 @@ struct ufs_hba { /* Device management request data */ struct ufs_dev_cmd dev_cmd; + /* Keeps information of the UFS device connected to this host */ + struct ufs_dev_info dev_info; bool auto_bkops_enabled; + struct ufs_vreg_info vreg_info; + struct list_head clk_list_head; + + bool wlun_dev_clr_ua; + + struct ufs_pa_layer_attr pwr_info; + struct ufs_pwr_mode_info max_pwr_info; + + struct ufs_clk_gating clk_gating; + /* Control to enable/disable host capabilities */ + u32 caps; + /* Allow dynamic clk gating */ +#define UFSHCD_CAP_CLK_GATING (1 << 0) + /* Allow hiberb8 with clk gating */ +#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1) + /* Allow dynamic clk scaling */ +#define UFSHCD_CAP_CLK_SCALING (1 << 2) + /* Allow auto bkops to enabled during runtime suspend */ +#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3) + + struct devfreq *devfreq; + struct ufs_clk_scaling clk_scaling; + bool is_sys_suspended; }; +/* Returns true if clocks can be gated. Otherwise false */ +static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_CLK_GATING; +} +static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; +} +static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_CLK_SCALING; +} +static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; +} + #define ufshcd_writel(hba, val, reg) \ writel((val), (hba)->mmio_base + (reg)) #define ufshcd_readl(hba, reg) \ readl((hba)->mmio_base + (reg)) -int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * , - unsigned int); +/** + * ufshcd_rmwl - read modify write into a register + * @hba - per adapter instance + * @mask - mask to apply on read value + * @val - actual value to write + * @reg - register address + */ +static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) +{ + u32 tmp; + + tmp = ufshcd_readl(hba, reg); + tmp &= ~mask; + tmp |= (val & mask); + ufshcd_writel(hba, tmp, reg); +} + +int ufshcd_alloc_host(struct device *, struct ufs_hba **); +int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); void ufshcd_remove(struct ufs_hba *); /** @@ -275,11 +536,12 @@ static inline void check_upiu_size(void) GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); } -extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state); -extern int ufshcd_resume(struct ufs_hba *hba); extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba); extern int ufshcd_runtime_idle(struct ufs_hba *hba); +extern int ufshcd_system_suspend(struct ufs_hba *hba); +extern int ufshcd_system_resume(struct ufs_hba *hba); +extern int ufshcd_shutdown(struct ufs_hba *hba); extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, u32 mib_val, u8 peer); extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, @@ -331,4 +593,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); } +int ufshcd_hold(struct ufs_hba *hba, bool async); +void ufshcd_release(struct ufs_hba *hba); #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index e1b844bc9460..d5721199e9cc 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -124,8 +124,11 @@ enum { #define CONTROLLER_FATAL_ERROR UFS_BIT(16) #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) -#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\ - UIC_POWER_MODE) +#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\ + UIC_HIBERNATE_EXIT |\ + UIC_POWER_MODE) + +#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK) #define UFSHCD_ERROR_MASK (UIC_ERROR |\ DEVICE_FATAL_ERROR |\ @@ -210,7 +213,7 @@ enum { #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) /* UIC Commands */ -enum { +enum uic_cmd_dme { UIC_CMD_DME_GET = 0x01, UIC_CMD_DME_SET = 0x02, UIC_CMD_DME_PEER_GET = 0x03, diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index 0bb8041c047a..3fc3e21b746b 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -13,6 +13,44 @@ #define _UNIPRO_H_ /* + * M-TX Configuration Attributes + */ +#define TX_MODE 0x0021 +#define TX_HSRATE_SERIES 0x0022 +#define TX_HSGEAR 0x0023 +#define TX_PWMGEAR 0x0024 +#define TX_AMPLITUDE 0x0025 +#define TX_HS_SLEWRATE 0x0026 +#define TX_SYNC_SOURCE 0x0027 +#define TX_HS_SYNC_LENGTH 0x0028 +#define TX_HS_PREPARE_LENGTH 0x0029 +#define TX_LS_PREPARE_LENGTH 0x002A +#define TX_HIBERN8_CONTROL 0x002B +#define TX_LCC_ENABLE 0x002C +#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D +#define TX_BYPASS_8B10B_ENABLE 0x002E +#define TX_DRIVER_POLARITY 0x002F +#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030 +#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031 +#define TX_LCC_SEQUENCER 0x0032 +#define TX_MIN_ACTIVATETIME 0x0033 +#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034 + +/* + * M-RX Configuration Attributes + */ +#define RX_MODE 0x00A1 +#define RX_HSRATE_SERIES 0x00A2 +#define RX_HSGEAR 0x00A3 +#define RX_PWMGEAR 0x00A4 +#define RX_LS_TERMINATED_ENABLE 0x00A5 +#define RX_HS_UNTERMINATED_ENABLE 0x00A6 +#define RX_ENTER_HIBERN8 0x00A7 +#define RX_BYPASS_8B10B_ENABLE 0x00A8 +#define RX_TERMINATION_FORCE_ENABLE 0x0089 + +#define is_mphy_tx_attr(attr) (attr < RX_MODE) +/* * PHY Adpater attributes */ #define PA_ACTIVETXDATALANES 0x1560 @@ -87,6 +125,24 @@ enum { PA_HS_MODE_B = 2, }; +enum ufs_pwm_gear_tag { + UFS_PWM_DONT_CHANGE, /* Don't change Gear */ + UFS_PWM_G1, /* PWM Gear 1 (default for reset) */ + UFS_PWM_G2, /* PWM Gear 2 */ + UFS_PWM_G3, /* PWM Gear 3 */ + UFS_PWM_G4, /* PWM Gear 4 */ + UFS_PWM_G5, /* PWM Gear 5 */ + UFS_PWM_G6, /* PWM Gear 6 */ + UFS_PWM_G7, /* PWM Gear 7 */ +}; + +enum ufs_hs_gear_tag { + UFS_HS_DONT_CHANGE, /* Don't change Gear */ + UFS_HS_G1, /* HS Gear 1 (default for reset) */ + UFS_HS_G2, /* HS Gear 2 */ + UFS_HS_G3, /* HS Gear 3 */ +}; + /* * Data Link Layer Attributes */ |