diff options
Diffstat (limited to 'drivers')
169 files changed, 6638 insertions, 1601 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index d0aad06b3872..f245bf35bedb 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -145,6 +145,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "AMD0010", APD_ADDR(cz_i2c_desc) }, { "AMDI0010", APD_ADDR(cz_i2c_desc) }, { "AMD0020", APD_ADDR(cz_uart_desc) }, + { "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, #endif #ifdef CONFIG_ARM64 diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index b5e54f2da53d..0d92d0f915e9 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device) } #endif /* CONFIG_ACPI_HOTPLUG_CPU */ +#ifdef CONFIG_X86 +static bool acpi_hwp_native_thermal_lvt_set; +static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle, + u32 lvl, + void *context, + void **rv) +{ + u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; + u32 capbuf[2]; + struct acpi_osc_context osc_context = { + .uuid_str = sb_uuid_str, + .rev = 1, + .cap.length = 8, + .cap.pointer = capbuf, + }; + + if (acpi_hwp_native_thermal_lvt_set) + return AE_CTRL_TERMINATE; + + capbuf[0] = 0x0000; + capbuf[1] = 0x1000; /* set bit 12 */ + + if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) { + if (osc_context.ret.pointer && osc_context.ret.length > 1) { + u32 *capbuf_ret = osc_context.ret.pointer; + + if (capbuf_ret[1] & 0x1000) { + acpi_handle_info(handle, + "_OSC native thermal LVT Acked\n"); + acpi_hwp_native_thermal_lvt_set = true; + } + } + kfree(osc_context.ret.pointer); + } + + return AE_OK; +} + +void __init acpi_early_processor_osc(void) +{ + if (boot_cpu_has(X86_FEATURE_HWP)) { + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, + acpi_hwp_native_thermal_lvt_osc, + NULL, NULL, NULL); + acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, + acpi_hwp_native_thermal_lvt_osc, + NULL, NULL); + } +} +#endif + /* * The following ACPI IDs are known to be suitable for representing as * processor devices. diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 0e8567846f1a..c068c829b453 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1019,6 +1019,9 @@ static int __init acpi_bus_init(void) goto error1; } + /* Set capability bits for _OSC under processor scope */ + acpi_early_processor_osc(); + /* * _OSC method may exist in module level code, * so it must be run after ACPI_FULL_INITIALIZATION diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index a37508ef66c1..7c188472d9c2 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -145,6 +145,12 @@ void acpi_early_processor_set_pdc(void); static inline void acpi_early_processor_set_pdc(void) {} #endif +#ifdef CONFIG_X86 +void acpi_early_processor_osc(void); +#else +static inline void acpi_early_processor_osc(void) {} +#endif + /* -------------------------------------------------------------------------- Embedded Controller -------------------------------------------------------------------------- */ diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 2aee41655ce9..f2fd3fee588a 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -816,6 +816,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct device *dev, next = adev->node.next; if (next == head) { child = NULL; + adev = ACPI_COMPANION(dev); goto nondev; } adev = list_entry(next, struct acpi_device, node); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index d02fd53042a5..56241eb341f4 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -27,8 +27,20 @@ #ifdef CONFIG_X86 #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) +static inline bool acpi_iospace_resource_valid(struct resource *res) +{ + /* On X86 IO space is limited to the [0 - 64K] IO port range */ + return res->end < 0x10003; +} #else #define valid_IRQ(i) (true) +/* + * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical + * addresses mapping IO space in CPU physical address space, IO space + * resources can be placed anywhere in the 64-bit physical address space. + */ +static inline bool +acpi_iospace_resource_valid(struct resource *res) { return true; } #endif static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) @@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len, if (!acpi_dev_resource_len_valid(res->start, res->end, len, true)) res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; - if (res->end >= 0x10003) + if (!acpi_iospace_resource_valid(res)) res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; if (io_decode == ACPI_DECODE_16) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index fbfcce3b5227..2a8b59644297 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -748,6 +748,7 @@ static int acpi_hibernation_enter(void) static void acpi_hibernation_leave(void) { + pm_set_resume_via_firmware(); /* * If ACPI is not enabled by the BIOS and the boot kernel, we need to * enable it here. diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index f12a72428aac..050673f0c0b3 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -692,7 +692,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs) mask = obj->integer.value; else if (obj->type == ACPI_TYPE_BUFFER) for (i = 0; i < obj->buffer.length && i < 8; i++) - mask |= (((u8)obj->buffer.pointer[i]) << (i * 8)); + mask |= (((u64)obj->buffer.pointer[i]) << (i * 8)); ACPI_FREE(obj); /* diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 272a52ebafc0..0e64a1b5e62a 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -137,6 +137,62 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk) return __pm_clk_add(dev, NULL, clk); } + +/** + * of_pm_clk_add_clks - Start using device clock(s) for power management. + * @dev: Device whose clock(s) is going to be used for power management. + * + * Add a series of clocks described in the 'clocks' device-tree node for + * a device to the list of clocks used for the power management of @dev. + * On success, returns the number of clocks added. Returns a negative + * error code if there are no clocks in the device node for the device + * or if adding a clock fails. + */ +int of_pm_clk_add_clks(struct device *dev) +{ + struct clk **clks; + unsigned int i, count; + int ret; + + if (!dev || !dev->of_node) + return -EINVAL; + + count = of_count_phandle_with_args(dev->of_node, "clocks", + "#clock-cells"); + if (count == 0) + return -ENODEV; + + clks = kcalloc(count, sizeof(*clks), GFP_KERNEL); + if (!clks) + return -ENOMEM; + + for (i = 0; i < count; i++) { + clks[i] = of_clk_get(dev->of_node, i); + if (IS_ERR(clks[i])) { + ret = PTR_ERR(clks[i]); + goto error; + } + + ret = pm_clk_add_clk(dev, clks[i]); + if (ret) { + clk_put(clks[i]); + goto error; + } + } + + kfree(clks); + + return i; + +error: + while (i--) + pm_clk_remove_clk(dev, clks[i]); + + kfree(clks); + + return ret; +} + /** * __pm_clk_remove - Destroy PM clock entry. * @ce: PM clock entry to destroy. @@ -198,6 +254,39 @@ void pm_clk_remove(struct device *dev, const char *con_id) } /** + * pm_clk_remove_clk - Stop using a device clock for power management. + * @dev: Device whose clock should not be used for PM any more. + * @clk: Clock pointer + * + * Remove the clock pointed to by @clk from the list of clocks used for + * the power management of @dev. + */ +void pm_clk_remove_clk(struct device *dev, struct clk *clk) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + + if (!psd || !clk) + return; + + spin_lock_irq(&psd->lock); + + list_for_each_entry(ce, &psd->clock_list, node) { + if (clk == ce->clk) + goto remove; + } + + spin_unlock_irq(&psd->lock); + return; + + remove: + list_del(&ce->node); + spin_unlock_irq(&psd->lock); + + __pm_clk_remove(ce); +} + +/** * pm_clk_init - Initialize a device's list of power management clocks. * @dev: Device to initialize the list of PM clocks for. * diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index cc2e71d0a77f..25824c1697c5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2051,7 +2051,7 @@ static int exec_drive_taskfile(struct driver_data *dd, outbuf, taskout, DMA_TO_DEVICE); - if (outbuf_dma == 0) { + if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) { err = -ENOMEM; goto abort; } @@ -2068,7 +2068,7 @@ static int exec_drive_taskfile(struct driver_data *dd, inbuf_dma = pci_map_single(dd->pdev, inbuf, taskin, DMA_FROM_DEVICE); - if (inbuf_dma == 0) { + if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) { err = -ENOMEM; goto abort; } diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 64a7b5971b57..cab97593ba54 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -742,10 +742,11 @@ static int null_add_dev(void) add_disk(disk); +done: mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); mutex_unlock(&lock); -done: + return 0; out_cleanup_lightnvm: diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4a876785b68c..9c6234428607 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1847,14 +1847,12 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, if (osd_req->r_result < 0) obj_request->result = osd_req->r_result; - rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP); - /* * We support a 64-bit length, but ultimately it has to be * passed to the block layer, which just supports a 32-bit * length field. */ - obj_request->xferred = osd_req->r_reply_op_len[0]; + obj_request->xferred = osd_req->r_ops[0].outdata_len; rbd_assert(obj_request->xferred < (u64)UINT_MAX); opcode = osd_req->r_ops[0].op; @@ -5643,18 +5641,12 @@ static void rbd_sysfs_cleanup(void) static int rbd_slab_init(void) { rbd_assert(!rbd_img_request_cache); - rbd_img_request_cache = kmem_cache_create("rbd_img_request", - sizeof (struct rbd_img_request), - __alignof__(struct rbd_img_request), - 0, NULL); + rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0); if (!rbd_img_request_cache) return -ENOMEM; rbd_assert(!rbd_obj_request_cache); - rbd_obj_request_cache = kmem_cache_create("rbd_obj_request", - sizeof (struct rbd_obj_request), - __alignof__(struct rbd_obj_request), - 0, NULL); + rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0); if (!rbd_obj_request_cache) goto out_err; diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index d23368874710..f8a483c67b07 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -286,7 +286,7 @@ static int register_device(int minor, struct pp_struct *pp) struct parport *port; struct pardevice *pdev = NULL; char *name; - struct pardev_cb ppdev_cb; + int fl; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) @@ -299,11 +299,9 @@ static int register_device(int minor, struct pp_struct *pp) return -ENXIO; } - memset(&ppdev_cb, 0, sizeof(ppdev_cb)); - ppdev_cb.irq_func = pp_irq; - ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; - ppdev_cb.private = pp; - pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); + fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; + pdev = parport_register_device(port, name, NULL, + NULL, pp_irq, fl, pp); parport_put_port(port); if (!pdev) { @@ -801,23 +799,10 @@ static void pp_detach(struct parport *port) device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); } -static int pp_probe(struct pardevice *par_dev) -{ - struct device_driver *drv = par_dev->dev.driver; - int len = strlen(drv->name); - - if (strncmp(par_dev->name, drv->name, len)) - return -ENODEV; - - return 0; -} - static struct parport_driver pp_driver = { .name = CHRDEV, - .probe = pp_probe, - .match_port = pp_attach, + .attach = pp_attach, .detach = pp_detach, - .devmodel = true, }; static int __init ppdev_init(void) diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c index 9e9fe4b19ac4..309049d41f1b 100644 --- a/drivers/clk/mediatek/reset.c +++ b/drivers/clk/mediatek/reset.c @@ -57,7 +57,7 @@ static int mtk_reset(struct reset_controller_dev *rcdev, return mtk_reset_deassert(rcdev, id); } -static struct reset_control_ops mtk_reset_ops = { +static const struct reset_control_ops mtk_reset_ops = { .assert = mtk_reset_assert, .deassert = mtk_reset_deassert, .reset = mtk_reset, diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c index b54da1fe73f0..b4e4d6aa2631 100644 --- a/drivers/clk/mmp/reset.c +++ b/drivers/clk/mmp/reset.c @@ -74,7 +74,7 @@ static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev, return 0; } -static struct reset_control_ops mmp_clk_reset_ops = { +static const struct reset_control_ops mmp_clk_reset_ops = { .assert = mmp_clk_reset_assert, .deassert = mmp_clk_reset_deassert, }; diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c index 5428efb9fbf5..3cd1af0af0d9 100644 --- a/drivers/clk/qcom/gcc-ipq4019.c +++ b/drivers/clk/qcom/gcc-ipq4019.c @@ -129,20 +129,10 @@ static const char * const gcc_xo_ddr_500_200[] = { }; #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } -#define P_XO 0 -#define FE_PLL_200 1 -#define FE_PLL_500 2 -#define DDRC_PLL_666 3 - -#define DDRC_PLL_666_SDCC 1 -#define FE_PLL_125_DLY 1 - -#define FE_PLL_WCSS2G 1 -#define FE_PLL_WCSS5G 1 static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = { F(48000000, P_XO, 1, 0, 0), - F(200000000, FE_PLL_200, 1, 0, 0), + F(200000000, P_FEPLL200, 1, 0, 0), { } }; @@ -334,15 +324,15 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { }; static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = { - F(1843200, FE_PLL_200, 1, 144, 15625), - F(3686400, FE_PLL_200, 1, 288, 15625), - F(7372800, FE_PLL_200, 1, 576, 15625), - F(14745600, FE_PLL_200, 1, 1152, 15625), - F(16000000, FE_PLL_200, 1, 2, 25), + F(1843200, P_FEPLL200, 1, 144, 15625), + F(3686400, P_FEPLL200, 1, 288, 15625), + F(7372800, P_FEPLL200, 1, 576, 15625), + F(14745600, P_FEPLL200, 1, 1152, 15625), + F(16000000, P_FEPLL200, 1, 2, 25), F(24000000, P_XO, 1, 1, 2), - F(32000000, FE_PLL_200, 1, 4, 25), - F(40000000, FE_PLL_200, 1, 1, 5), - F(46400000, FE_PLL_200, 1, 29, 125), + F(32000000, P_FEPLL200, 1, 4, 25), + F(40000000, P_FEPLL200, 1, 1, 5), + F(46400000, P_FEPLL200, 1, 29, 125), F(48000000, P_XO, 1, 0, 0), { } }; @@ -410,9 +400,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = { }; static const struct freq_tbl ftbl_gcc_gp_clk[] = { - F(1250000, FE_PLL_200, 1, 16, 0), - F(2500000, FE_PLL_200, 1, 8, 0), - F(5000000, FE_PLL_200, 1, 4, 0), + F(1250000, P_FEPLL200, 1, 16, 0), + F(2500000, P_FEPLL200, 1, 8, 0), + F(5000000, P_FEPLL200, 1, 4, 0), { } }; @@ -512,11 +502,11 @@ static struct clk_branch gcc_gp3_clk = { static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = { F(144000, P_XO, 1, 3, 240), F(400000, P_XO, 1, 1, 0), - F(20000000, FE_PLL_500, 1, 1, 25), - F(25000000, FE_PLL_500, 1, 1, 20), - F(50000000, FE_PLL_500, 1, 1, 10), - F(100000000, FE_PLL_500, 1, 1, 5), - F(193000000, DDRC_PLL_666_SDCC, 1, 0, 0), + F(20000000, P_FEPLL500, 1, 1, 25), + F(25000000, P_FEPLL500, 1, 1, 20), + F(50000000, P_FEPLL500, 1, 1, 10), + F(100000000, P_FEPLL500, 1, 1, 5), + F(193000000, P_DDRPLL, 1, 0, 0), { } }; @@ -536,9 +526,9 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { static const struct freq_tbl ftbl_gcc_apps_clk[] = { F(48000000, P_XO, 1, 0, 0), - F(200000000, FE_PLL_200, 1, 0, 0), - F(500000000, FE_PLL_500, 1, 0, 0), - F(626000000, DDRC_PLL_666, 1, 0, 0), + F(200000000, P_FEPLL200, 1, 0, 0), + F(500000000, P_FEPLL500, 1, 0, 0), + F(626000000, P_DDRPLLAPSS, 1, 0, 0), { } }; @@ -557,7 +547,7 @@ static struct clk_rcg2 apps_clk_src = { static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = { F(48000000, P_XO, 1, 0, 0), - F(100000000, FE_PLL_200, 2, 0, 0), + F(100000000, P_FEPLL200, 2, 0, 0), { } }; @@ -940,7 +930,7 @@ static struct clk_branch gcc_usb2_mock_utmi_clk = { }; static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = { - F(2000000, FE_PLL_200, 10, 0, 0), + F(2000000, P_FEPLL200, 10, 0, 0), { } }; @@ -1007,7 +997,7 @@ static struct clk_branch gcc_usb3_mock_utmi_clk = { }; static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = { - F(125000000, FE_PLL_125_DLY, 1, 0, 0), + F(125000000, P_FEPLL125DLY, 1, 0, 0), { } }; @@ -1027,7 +1017,7 @@ static struct clk_rcg2 fephy_125m_dly_clk_src = { static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = { F(48000000, P_XO, 1, 0, 0), - F(250000000, FE_PLL_WCSS2G, 1, 0, 0), + F(250000000, P_FEPLLWCSS2G, 1, 0, 0), { } }; @@ -1097,7 +1087,7 @@ static struct clk_branch gcc_wcss2g_rtc_clk = { static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = { F(48000000, P_XO, 1, 0, 0), - F(250000000, FE_PLL_WCSS5G, 1, 0, 0), + F(250000000, P_FEPLLWCSS5G, 1, 0, 0), { } }; @@ -1325,6 +1315,16 @@ MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table); static int gcc_ipq4019_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; + + clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000); + clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000); + clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000); + clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000); + clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000); + clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000); + clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000); + return qcom_cc_probe(pdev, &gcc_ipq4019_desc); } diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c index 6c977d3a8590..0324d8daab9b 100644 --- a/drivers/clk/qcom/reset.c +++ b/drivers/clk/qcom/reset.c @@ -55,7 +55,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) return regmap_update_bits(rst->regmap, map->reg, mask, 0); } -struct reset_control_ops qcom_reset_ops = { +const struct reset_control_ops qcom_reset_ops = { .reset = qcom_reset, .assert = qcom_reset_assert, .deassert = qcom_reset_deassert, diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h index 0e11e2130f97..cda877927d43 100644 --- a/drivers/clk/qcom/reset.h +++ b/drivers/clk/qcom/reset.h @@ -32,6 +32,6 @@ struct qcom_reset_controller { #define to_qcom_reset_controller(r) \ container_of(r, struct qcom_reset_controller, rcdev); -extern struct reset_control_ops qcom_reset_ops; +extern const struct reset_control_ops qcom_reset_ops; #endif diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c index 552f7bb15bc5..21218987bbc3 100644 --- a/drivers/clk/rockchip/softrst.c +++ b/drivers/clk/rockchip/softrst.c @@ -81,7 +81,7 @@ static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev, return 0; } -static struct reset_control_ops rockchip_softrst_ops = { +static const struct reset_control_ops rockchip_softrst_ops = { .assert = rockchip_softrst_assert, .deassert = rockchip_softrst_deassert, }; diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c index 957aae63e7cc..d0c6c9a2d06a 100644 --- a/drivers/clk/sirf/clk-atlas7.c +++ b/drivers/clk/sirf/clk-atlas7.c @@ -1423,7 +1423,7 @@ static int atlas7_reset_module(struct reset_controller_dev *rcdev, return 0; } -static struct reset_control_ops atlas7_rst_ops = { +static const struct reset_control_ops atlas7_rst_ops = { .reset = atlas7_reset_module, }; diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c index 044c1717b762..d9ea22ec4e25 100644 --- a/drivers/clk/sunxi/clk-a10-ve.c +++ b/drivers/clk/sunxi/clk-a10-ve.c @@ -85,7 +85,7 @@ static int sunxi_ve_of_xlate(struct reset_controller_dev *rcdev, return 0; } -static struct reset_control_ops sunxi_ve_reset_ops = { +static const struct reset_control_ops sunxi_ve_reset_ops = { .assert = sunxi_ve_reset_assert, .deassert = sunxi_ve_reset_deassert, }; diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index a9b176139aca..028dd832a39f 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -83,7 +83,7 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev, return 0; } -static struct reset_control_ops sun9i_mmc_reset_ops = { +static const struct reset_control_ops sun9i_mmc_reset_ops = { .assert = sun9i_mmc_reset_assert, .deassert = sun9i_mmc_reset_deassert, }; diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c index 5432b1c198a4..fe0c3d169377 100644 --- a/drivers/clk/sunxi/clk-usb.c +++ b/drivers/clk/sunxi/clk-usb.c @@ -76,7 +76,7 @@ static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev, return 0; } -static struct reset_control_ops sunxi_usb_reset_ops = { +static const struct reset_control_ops sunxi_usb_reset_ops = { .assert = sunxi_usb_reset_assert, .deassert = sunxi_usb_reset_deassert, }; diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index 2a3a4fe803d6..f60fe2e344ca 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -271,7 +271,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, } } -static struct reset_control_ops rst_ops = { +static const struct reset_control_ops rst_ops = { .assert = tegra_clk_rst_assert, .deassert = tegra_clk_rst_deassert, }; diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 59a7b380fbe2..fb5712141040 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -245,7 +245,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) } } -u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) +static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) { u32 val, dummy; @@ -253,7 +253,7 @@ u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) return val; } -void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) +static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) { u32 lo, hi; @@ -262,7 +262,7 @@ void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) wrmsr(MSR_IA32_PERF_CTL, lo, hi); } -u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) +static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) { u32 val, dummy; @@ -270,12 +270,12 @@ u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) return val; } -void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) +static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) { wrmsr(MSR_AMD_PERF_CTL, val, 0); } -u32 cpu_freq_read_io(struct acpi_pct_register *reg) +static u32 cpu_freq_read_io(struct acpi_pct_register *reg) { u32 val; @@ -283,7 +283,7 @@ u32 cpu_freq_read_io(struct acpi_pct_register *reg) return val; } -void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) +static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) { acpi_os_write_port(reg->address, val, reg->bit_width); } @@ -514,8 +514,10 @@ static int boost_notify(struct notifier_block *nb, unsigned long action, */ switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask); break; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 4c7825856eab..b87596b591b3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -76,6 +76,7 @@ static inline bool has_target(void) /* internal prototypes */ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); static unsigned int __cpufreq_get(struct cpufreq_policy *policy); +static int cpufreq_start_governor(struct cpufreq_policy *policy); /** * Two notifier lists: the "policy" list is involved in the @@ -964,10 +965,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp cpumask_set_cpu(cpu, policy->cpus); if (has_target()) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - + ret = cpufreq_start_governor(policy); if (ret) pr_err("%s: Failed to start governor\n", __func__); } @@ -1308,10 +1306,7 @@ static void cpufreq_offline(unsigned int cpu) /* Start governor again for active policy */ if (!policy_is_inactive(policy)) { if (has_target()) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - + ret = cpufreq_start_governor(policy); if (ret) pr_err("%s: Failed to start governor\n", __func__); } @@ -1401,9 +1396,17 @@ unsigned int cpufreq_quick_get(unsigned int cpu) { struct cpufreq_policy *policy; unsigned int ret_freq = 0; + unsigned long flags; - if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) - return cpufreq_driver->get(cpu); + read_lock_irqsave(&cpufreq_driver_lock, flags); + + if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) { + ret_freq = cpufreq_driver->get(cpu); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + return ret_freq; + } + + read_unlock_irqrestore(&cpufreq_driver_lock, flags); policy = cpufreq_cpu_get(cpu); if (policy) { @@ -1484,6 +1487,24 @@ unsigned int cpufreq_get(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_get); +static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy) +{ + unsigned int new_freq; + + new_freq = cpufreq_driver->get(policy->cpu); + if (!new_freq) + return 0; + + if (!policy->cur) { + pr_debug("cpufreq: Driver did not initialize current freq\n"); + policy->cur = new_freq; + } else if (policy->cur != new_freq && has_target()) { + cpufreq_out_of_sync(policy, new_freq); + } + + return new_freq; +} + static struct subsys_interface cpufreq_interface = { .name = "cpufreq", .subsys = &cpu_subsys, @@ -1583,9 +1604,7 @@ void cpufreq_resume(void) policy); } else { down_write(&policy->rwsem); - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + ret = cpufreq_start_governor(policy); up_write(&policy->rwsem); if (ret) @@ -1593,17 +1612,6 @@ void cpufreq_resume(void) __func__, policy); } } - - /* - * schedule call cpufreq_update_policy() for first-online CPU, as that - * wouldn't be hotplugged-out on suspend. It will verify that the - * current freq is in sync with what we believe it to be. - */ - policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); - if (WARN_ON(!policy)) - return; - - schedule_work(&policy->update); } /** @@ -1927,6 +1935,17 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) return ret; } +static int cpufreq_start_governor(struct cpufreq_policy *policy) +{ + int ret; + + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) + cpufreq_update_current_freq(policy); + + ret = cpufreq_governor(policy, CPUFREQ_GOV_START); + return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); +} + int cpufreq_register_governor(struct cpufreq_governor *governor) { int err; @@ -2063,8 +2082,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, return cpufreq_driver->setpolicy(new_policy); } - if (new_policy->governor == policy->governor) - goto out; + if (new_policy->governor == policy->governor) { + pr_debug("cpufreq: governor limits update\n"); + return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + } pr_debug("governor switch\n"); @@ -2092,10 +2113,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->governor = new_policy->governor; ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); if (!ret) { - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - goto out; - + ret = cpufreq_start_governor(policy); + if (!ret) { + pr_debug("cpufreq: governor change\n"); + return 0; + } cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); } @@ -2106,14 +2128,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) policy->governor = NULL; else - cpufreq_governor(policy, CPUFREQ_GOV_START); + cpufreq_start_governor(policy); } return ret; - - out: - pr_debug("governor: change or update limits\n"); - return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); } /** @@ -2144,19 +2162,11 @@ int cpufreq_update_policy(unsigned int cpu) * -> ask driver for current freq and notify governors about a change */ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { - new_policy.cur = cpufreq_driver->get(cpu); + new_policy.cur = cpufreq_update_current_freq(policy); if (WARN_ON(!new_policy.cur)) { ret = -EIO; goto unlock; } - - if (!policy->cur) { - pr_debug("Driver did not initialize current freq\n"); - policy->cur = new_policy.cur; - } else { - if (policy->cur != new_policy.cur && has_target()) - cpufreq_out_of_sync(policy, new_policy.cur); - } } ret = cpufreq_set_policy(policy, &new_policy); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 1c25ef405616..10a5cfeae8c5 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -329,7 +329,7 @@ static void dbs_irq_work(struct irq_work *irq_work) struct policy_dbs_info *policy_dbs; policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); - schedule_work(&policy_dbs->work); + schedule_work_on(smp_processor_id(), &policy_dbs->work); } static void dbs_update_util_handler(struct update_util_data *data, u64 time, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cb5607495816..4b644526fd59 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -134,7 +134,7 @@ struct pstate_funcs { int (*get_min)(void); int (*get_turbo)(void); int (*get_scaling)(void); - void (*set)(struct cpudata*, int pstate); + u64 (*get_val)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); int32_t (*get_target_pstate)(struct cpudata *); }; @@ -565,7 +565,7 @@ static int atom_get_turbo_pstate(void) return value & 0x7F; } -static void atom_set_pstate(struct cpudata *cpudata, int pstate) +static u64 atom_get_val(struct cpudata *cpudata, int pstate) { u64 val; int32_t vid_fp; @@ -585,9 +585,7 @@ static void atom_set_pstate(struct cpudata *cpudata, int pstate) if (pstate > cpudata->pstate.max_pstate) vid = cpudata->vid.turbo; - val |= vid; - - wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); + return val | vid; } static int silvermont_get_scaling(void) @@ -711,7 +709,7 @@ static inline int core_get_scaling(void) return 100000; } -static void core_set_pstate(struct cpudata *cpudata, int pstate) +static u64 core_get_val(struct cpudata *cpudata, int pstate) { u64 val; @@ -719,7 +717,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) if (limits->no_turbo && !limits->turbo_disabled) val |= (u64)1 << 32; - wrmsrl(MSR_IA32_PERF_CTL, val); + return val; } static int knl_get_turbo_pstate(void) @@ -750,7 +748,7 @@ static struct cpu_defaults core_params = { .get_min = core_get_min_pstate, .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, - .set = core_set_pstate, + .get_val = core_get_val, .get_target_pstate = get_target_pstate_use_performance, }, }; @@ -769,7 +767,7 @@ static struct cpu_defaults silvermont_params = { .get_max_physical = atom_get_max_pstate, .get_min = atom_get_min_pstate, .get_turbo = atom_get_turbo_pstate, - .set = atom_set_pstate, + .get_val = atom_get_val, .get_scaling = silvermont_get_scaling, .get_vid = atom_get_vid, .get_target_pstate = get_target_pstate_use_cpu_load, @@ -790,7 +788,7 @@ static struct cpu_defaults airmont_params = { .get_max_physical = atom_get_max_pstate, .get_min = atom_get_min_pstate, .get_turbo = atom_get_turbo_pstate, - .set = atom_set_pstate, + .get_val = atom_get_val, .get_scaling = airmont_get_scaling, .get_vid = atom_get_vid, .get_target_pstate = get_target_pstate_use_cpu_load, @@ -812,7 +810,7 @@ static struct cpu_defaults knl_params = { .get_min = core_get_min_pstate, .get_turbo = knl_get_turbo_pstate, .get_scaling = core_get_scaling, - .set = core_set_pstate, + .get_val = core_get_val, .get_target_pstate = get_target_pstate_use_performance, }, }; @@ -839,25 +837,24 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } -static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) +static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate) { - int max_perf, min_perf; - - if (force) { - update_turbo_state(); - - intel_pstate_get_min_max(cpu, &min_perf, &max_perf); - - pstate = clamp_t(int, pstate, min_perf, max_perf); - - if (pstate == cpu->pstate.current_pstate) - return; - } trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); - cpu->pstate.current_pstate = pstate; +} - pstate_funcs.set(cpu, pstate); +static void intel_pstate_set_min_pstate(struct cpudata *cpu) +{ + int pstate = cpu->pstate.min_pstate; + + intel_pstate_record_pstate(cpu, pstate); + /* + * Generally, there is no guarantee that this code will always run on + * the CPU being updated, so force the register update to run on the + * right CPU. + */ + wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + pstate_funcs.get_val(cpu, pstate)); } static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) @@ -870,7 +867,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); - intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); + + intel_pstate_set_min_pstate(cpu); } static inline void intel_pstate_calc_busy(struct cpudata *cpu) @@ -997,6 +995,21 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); } +static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) +{ + int max_perf, min_perf; + + update_turbo_state(); + + intel_pstate_get_min_max(cpu, &min_perf, &max_perf); + pstate = clamp_t(int, pstate, min_perf, max_perf); + if (pstate == cpu->pstate.current_pstate) + return; + + intel_pstate_record_pstate(cpu, pstate); + wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); +} + static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) { int from, target_pstate; @@ -1006,7 +1019,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) target_pstate = pstate_funcs.get_target_pstate(cpu); - intel_pstate_set_pstate(cpu, target_pstate, true); + intel_pstate_update_pstate(cpu, target_pstate); sample = &cpu->sample; trace_pstate_sample(fp_toint(sample->core_pct_busy), @@ -1180,7 +1193,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) if (hwp_active) return; - intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); + intel_pstate_set_min_pstate(cpu); } static int intel_pstate_cpu_init(struct cpufreq_policy *policy) @@ -1255,7 +1268,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_min = funcs->get_min; pstate_funcs.get_turbo = funcs->get_turbo; pstate_funcs.get_scaling = funcs->get_scaling; - pstate_funcs.set = funcs->set; + pstate_funcs.get_val = funcs->get_val; pstate_funcs.get_vid = funcs->get_vid; pstate_funcs.get_target_pstate = funcs->get_target_pstate; diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 50bf12033bbc..39ac78c94be0 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -44,7 +44,6 @@ static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; static bool rebooting, throttled, occ_reset; -static unsigned int *core_to_chip_map; static const char * const throttle_reason[] = { "No throttling", @@ -55,6 +54,16 @@ static const char * const throttle_reason[] = { "OCC Reset" }; +enum throttle_reason_type { + NO_THROTTLE = 0, + POWERCAP, + CPU_OVERTEMP, + POWER_SUPPLY_FAILURE, + OVERCURRENT, + OCC_RESET_THROTTLE, + OCC_MAX_REASON +}; + static struct chip { unsigned int id; bool throttled; @@ -62,9 +71,13 @@ static struct chip { u8 throttle_reason; cpumask_t mask; struct work_struct throttle; + int throttle_turbo; + int throttle_sub_turbo; + int reason[OCC_MAX_REASON]; } *chips; static int nr_chips; +static DEFINE_PER_CPU(struct chip *, chip_info); /* * Note: The set of pstates consists of contiguous integers, the @@ -196,6 +209,42 @@ static struct freq_attr *powernv_cpu_freq_attr[] = { NULL, }; +#define throttle_attr(name, member) \ +static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \ +{ \ + struct chip *chip = per_cpu(chip_info, policy->cpu); \ + \ + return sprintf(buf, "%u\n", chip->member); \ +} \ + \ +static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \ + +throttle_attr(unthrottle, reason[NO_THROTTLE]); +throttle_attr(powercap, reason[POWERCAP]); +throttle_attr(overtemp, reason[CPU_OVERTEMP]); +throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]); +throttle_attr(overcurrent, reason[OVERCURRENT]); +throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]); +throttle_attr(turbo_stat, throttle_turbo); +throttle_attr(sub_turbo_stat, throttle_sub_turbo); + +static struct attribute *throttle_attrs[] = { + &throttle_attr_unthrottle.attr, + &throttle_attr_powercap.attr, + &throttle_attr_overtemp.attr, + &throttle_attr_supply_fault.attr, + &throttle_attr_overcurrent.attr, + &throttle_attr_occ_reset.attr, + &throttle_attr_turbo_stat.attr, + &throttle_attr_sub_turbo_stat.attr, + NULL, +}; + +static const struct attribute_group throttle_attr_grp = { + .name = "throttle_stats", + .attrs = throttle_attrs, +}; + /* Helper routines */ /* Access helpers to power mgt SPR */ @@ -324,34 +373,35 @@ static inline unsigned int get_nominal_index(void) static void powernv_cpufreq_throttle_check(void *data) { + struct chip *chip; unsigned int cpu = smp_processor_id(); - unsigned int chip_id = core_to_chip_map[cpu_core_index_of_thread(cpu)]; unsigned long pmsr; - int pmsr_pmax, i; + int pmsr_pmax; pmsr = get_pmspr(SPRN_PMSR); - - for (i = 0; i < nr_chips; i++) - if (chips[i].id == chip_id) - break; + chip = this_cpu_read(chip_info); /* Check for Pmax Capping */ pmsr_pmax = (s8)PMSR_MAX(pmsr); if (pmsr_pmax != powernv_pstate_info.max) { - if (chips[i].throttled) + if (chip->throttled) goto next; - chips[i].throttled = true; - if (pmsr_pmax < powernv_pstate_info.nominal) + chip->throttled = true; + if (pmsr_pmax < powernv_pstate_info.nominal) { pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n", - cpu, chips[i].id, pmsr_pmax, + cpu, chip->id, pmsr_pmax, powernv_pstate_info.nominal); - trace_powernv_throttle(chips[i].id, - throttle_reason[chips[i].throttle_reason], + chip->throttle_sub_turbo++; + } else { + chip->throttle_turbo++; + } + trace_powernv_throttle(chip->id, + throttle_reason[chip->throttle_reason], pmsr_pmax); - } else if (chips[i].throttled) { - chips[i].throttled = false; - trace_powernv_throttle(chips[i].id, - throttle_reason[chips[i].throttle_reason], + } else if (chip->throttled) { + chip->throttled = false; + trace_powernv_throttle(chip->id, + throttle_reason[chip->throttle_reason], pmsr_pmax); } @@ -411,6 +461,21 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) for (i = 0; i < threads_per_core; i++) cpumask_set_cpu(base + i, policy->cpus); + if (!policy->driver_data) { + int ret; + + ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp); + if (ret) { + pr_info("Failed to create throttle stats directory for cpu %d\n", + policy->cpu); + return ret; + } + /* + * policy->driver_data is used as a flag for one-time + * creation of throttle sysfs files. + */ + policy->driver_data = policy; + } return cpufreq_table_validate_and_show(policy, powernv_freqs); } @@ -517,8 +582,10 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb, break; if (omsg.throttle_status >= 0 && - omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) + omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) { chips[i].throttle_reason = omsg.throttle_status; + chips[i].reason[omsg.throttle_status]++; + } if (!omsg.throttle_status) chips[i].restore = true; @@ -558,47 +625,34 @@ static int init_chip_info(void) unsigned int chip[256]; unsigned int cpu, i; unsigned int prev_chip_id = UINT_MAX; - cpumask_t cpu_mask; - int ret = -ENOMEM; - - core_to_chip_map = kcalloc(cpu_nr_cores(), sizeof(unsigned int), - GFP_KERNEL); - if (!core_to_chip_map) - goto out; - cpumask_copy(&cpu_mask, cpu_possible_mask); - for_each_cpu(cpu, &cpu_mask) { + for_each_possible_cpu(cpu) { unsigned int id = cpu_to_chip_id(cpu); if (prev_chip_id != id) { prev_chip_id = id; chip[nr_chips++] = id; } - core_to_chip_map[cpu_core_index_of_thread(cpu)] = id; - cpumask_andnot(&cpu_mask, &cpu_mask, cpu_sibling_mask(cpu)); } chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL); if (!chips) - goto free_chip_map; + return -ENOMEM; for (i = 0; i < nr_chips; i++) { chips[i].id = chip[i]; cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i])); INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn); + for_each_cpu(cpu, &chips[i].mask) + per_cpu(chip_info, cpu) = &chips[i]; } return 0; -free_chip_map: - kfree(core_to_chip_map); -out: - return ret; } static inline void clean_chip_info(void) { kfree(chips); - kfree(core_to_chip_map); } static inline void unregister_all_notifiers(void) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 27fc733cb5b9..03d38c291de6 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -196,7 +196,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); * of points is below a threshold. If it is... then use the * average of these 8 points as the estimated value. */ -static void get_typical_interval(struct menu_device *data) +static unsigned int get_typical_interval(struct menu_device *data) { int i, divisor; unsigned int max, thresh, avg; @@ -253,9 +253,7 @@ again: if (likely(variance <= U64_MAX/36)) { if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) || variance <= 400) { - if (data->next_timer_us > avg) - data->predicted_us = avg; - return; + return avg; } } @@ -269,7 +267,7 @@ again: * with sporadic activity with a bunch of short pauses. */ if ((divisor * 4) <= INTERVALS * 3) - return; + return UINT_MAX; thresh = max - 1; goto again; @@ -286,6 +284,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); int i; unsigned int interactivity_req; + unsigned int expected_interval; unsigned long nr_iowaiters, cpu_load; if (data->needs_update) { @@ -312,32 +311,43 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->correction_factor[data->bucket], RESOLUTION * DECAY); - get_typical_interval(data); - - /* - * Performance multiplier defines a minimum predicted idle - * duration / latency ratio. Adjust the latency limit if - * necessary. - */ - interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); - if (latency_req > interactivity_req) - latency_req = interactivity_req; + expected_interval = get_typical_interval(data); + expected_interval = min(expected_interval, data->next_timer_us); if (CPUIDLE_DRIVER_STATE_START > 0) { - data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; + struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START]; + unsigned int polling_threshold; + /* * We want to default to C1 (hlt), not to busy polling - * unless the timer is happening really really soon. + * unless the timer is happening really really soon, or + * C1's exit latency exceeds the user configured limit. */ - if (interactivity_req > 20 && - !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && - dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) + polling_threshold = max_t(unsigned int, 20, s->target_residency); + if (data->next_timer_us > polling_threshold && + latency_req > s->exit_latency && !s->disabled && + !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable) data->last_state_idx = CPUIDLE_DRIVER_STATE_START; + else + data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1; } else { data->last_state_idx = CPUIDLE_DRIVER_STATE_START; } /* + * Use the lowest expected idle interval to pick the idle state. + */ + data->predicted_us = min(data->predicted_us, expected_interval); + + /* + * Use the performance multiplier and the user-configurable + * latency_req to determine the maximum exit latency. + */ + interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); + if (latency_req > interactivity_req) + latency_req = interactivity_req; + + /* * Find the idle state with the lowest power while satisfying * our constraints. */ diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 64281bb2f650..4de78c552251 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -61,7 +61,7 @@ config DEVFREQ_GOV_USERSPACE Sets the frequency at the user specified one. This governor returns the user configured frequency if there has been an input to /sys/devices/.../power/devfreq_set_freq. - Otherwise, the governor does not change the frequnecy + Otherwise, the governor does not change the frequency given at the initialization. comment "DEVFREQ Drivers" diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 9810d1df0691..4a2c07ee6677 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file, struct dma_buf *dmabuf; struct dma_buf_sync sync; enum dma_data_direction direction; + int ret; dmabuf = file->private_data; @@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file, } if (sync.flags & DMA_BUF_SYNC_END) - dma_buf_end_cpu_access(dmabuf, direction); + ret = dma_buf_end_cpu_access(dmabuf, direction); else - dma_buf_begin_cpu_access(dmabuf, direction); + ret = dma_buf_begin_cpu_access(dmabuf, direction); - return 0; + return ret; default: return -ENOTTY; } @@ -611,15 +612,19 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); * @dmabuf: [in] buffer to complete cpu access for. * @direction: [in] length of range for cpu access. * - * This call must always succeed. + * Can return negative error values, returns 0 on success. */ -void dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) +int dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) { + int ret = 0; + WARN_ON(!dmabuf); if (dmabuf->ops->end_cpu_access) - dmabuf->ops->end_cpu_access(dmabuf, direction); + ret = dmabuf->ops->end_cpu_access(dmabuf, direction); + + return ret; } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index dc6874424188..6b816878e5e7 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h @@ -16,7 +16,7 @@ #include <linux/spinlock.h> #include <linux/types.h> -#include <asm-generic/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "virt-dma.h" diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 8a46077129ac..631c977b0da5 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -446,14 +446,16 @@ static void bus_reset_irq_handler(struct pcilynx *lynx) { struct client *client; - struct timeval tv; + struct timespec64 ts64; + u32 timestamp; - do_gettimeofday(&tv); + ktime_get_real_ts64(&ts64); + timestamp = ts64.tv_nsec / NSEC_PER_USEC; spin_lock(&lynx->client_list_lock); list_for_each_entry(client, &lynx->client_list, link) - packet_buffer_put(&client->buffer, &tv.tv_usec, 4); + packet_buffer_put(&client->buffer, ×tamp, 4); spin_unlock(&lynx->client_list_lock); } diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c index a68e199d579d..c5c9599a3a71 100644 --- a/drivers/gpio/gpio-menz127.c +++ b/drivers/gpio/gpio-menz127.c @@ -37,7 +37,6 @@ struct men_z127_gpio { void __iomem *reg_base; struct mcb_device *mdev; struct resource *mem; - spinlock_t lock; }; static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, @@ -69,7 +68,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, debounce /= 50; } - spin_lock(&priv->lock); + spin_lock(&gc->bgpio_lock); db_en = readl(priv->reg_base + MEN_Z127_DBER); @@ -84,7 +83,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, writel(db_en, priv->reg_base + MEN_Z127_DBER); writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio)); - spin_unlock(&priv->lock); + spin_unlock(&gc->bgpio_lock); return 0; } @@ -97,7 +96,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin) if (gpio_pin >= gc->ngpio) return -EINVAL; - spin_lock(&priv->lock); + spin_lock(&gc->bgpio_lock); od_en = readl(priv->reg_base + MEN_Z127_ODER); if (gpiochip_line_is_open_drain(gc, gpio_pin)) @@ -106,7 +105,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin) od_en &= ~BIT(gpio_pin); writel(od_en, priv->reg_base + MEN_Z127_ODER); - spin_unlock(&priv->lock); + spin_unlock(&gc->bgpio_lock); return 0; } diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c index c0aa387664bf..0dc916191689 100644 --- a/drivers/gpio/gpio-xgene.c +++ b/drivers/gpio/gpio-xgene.c @@ -173,6 +173,11 @@ static int xgene_gpio_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + err = -EINVAL; + goto err; + } + gpio->base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!gpio->base) { diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index 0f734ee05274..ca77ec10147c 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -1,10 +1,14 @@ -menu "ACP Configuration" +menu "ACP (Audio CoProcessor) Configuration" config DRM_AMD_ACP - bool "Enable ACP IP support" + bool "Enable AMD Audio CoProcessor IP support" select MFD_CORE select PM_GENERIC_DOMAINS if PM help Choose this option to enable ACP IP support for AMD SOCs. + This adds the ACP (Audio CoProcessor) IP driver and wires + it up into the amdgpu driver. The ACP block provides the DMA + engine for the i2s-based ALSA driver. It is required for audio + on APUs which utilize an i2s codec. endmenu diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index d7ec9bd6755f..9f4a45cd2aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -48,7 +48,8 @@ struct amdgpu_mn { /* protected by adev->mn_lock */ struct hlist_node node; - /* objects protected by mm->mmap_sem */ + /* objects protected by lock */ + struct mutex lock; struct rb_root objects; }; @@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) struct amdgpu_bo *bo, *next_bo; mutex_lock(&adev->mn_lock); - down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock); hash_del(&rmn->node); rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, it.rb) { @@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) } kfree(node); } - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); mutex_unlock(&adev->mn_lock); mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); kfree(rmn); @@ -105,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, } /** + * amdgpu_mn_invalidate_node - unmap all BOs of a node + * + * @node: the node with the BOs to unmap + * + * We block for all BOs and unmap them by move them + * into system domain again. + */ +static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, + unsigned long start, + unsigned long end) +{ + struct amdgpu_bo *bo; + long r; + + list_for_each_entry(bo, &node->bos, mn_list) { + + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) + continue; + + r = amdgpu_bo_reserve(bo, true); + if (r) { + DRM_ERROR("(%ld) failed to reserve user bo\n", r); + continue; + } + + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, + true, false, MAX_SCHEDULE_TIMEOUT); + if (r <= 0) + DRM_ERROR("(%ld) failed to wait for user bo\n", r); + + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (r) + DRM_ERROR("(%ld) failed to validate user bo\n", r); + + amdgpu_bo_unreserve(bo); + } +} + +/** + * amdgpu_mn_invalidate_page - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @address: address of invalidate page + * + * Invalidation of a single page. Blocks for all BOs mapping it + * and unmap them by move them into system domain again. + */ +static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); + struct interval_tree_node *it; + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, address, address); + if (it) { + struct amdgpu_mn_node *node; + + node = container_of(it, struct amdgpu_mn_node, it); + amdgpu_mn_invalidate_node(node, address, address); + } + + mutex_unlock(&rmn->lock); +} + +/** * amdgpu_mn_invalidate_range_start - callback to notify about mm change * * @mn: our notifier @@ -126,44 +197,24 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1; + mutex_lock(&rmn->lock); + it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct amdgpu_mn_node *node; - struct amdgpu_bo *bo; - long r; node = container_of(it, struct amdgpu_mn_node, it); it = interval_tree_iter_next(it, start, end); - list_for_each_entry(bo, &node->bos, mn_list) { - - if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, - end)) - continue; - - r = amdgpu_bo_reserve(bo, true); - if (r) { - DRM_ERROR("(%ld) failed to reserve user bo\n", r); - continue; - } - - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, - true, false, MAX_SCHEDULE_TIMEOUT); - if (r <= 0) - DRM_ERROR("(%ld) failed to wait for user bo\n", r); - - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (r) - DRM_ERROR("(%ld) failed to validate user bo\n", r); - - amdgpu_bo_unreserve(bo); - } + amdgpu_mn_invalidate_node(node, start, end); } + + mutex_unlock(&rmn->lock); } static const struct mmu_notifier_ops amdgpu_mn_ops = { .release = amdgpu_mn_release, + .invalidate_page = amdgpu_mn_invalidate_page, .invalidate_range_start = amdgpu_mn_invalidate_range_start, }; @@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->adev = adev; rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; + mutex_init(&rmn->lock); rmn->objects = RB_ROOT; r = __mmu_notifier_register(&rmn->mn, mm); @@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) INIT_LIST_HEAD(&bos); - down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock); while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { kfree(node); @@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) if (!node) { node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); if (!node) { - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); return -ENOMEM; } } @@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) interval_tree_insert(&node->it, &rmn->objects); - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); return 0; } @@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) return; } - down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock); /* save the next list entry for later */ head = bo->mn_list.next; @@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) kfree(node); } - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); mutex_unlock(&adev->mn_lock); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 151a2d42c639..56d1458393cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -608,6 +608,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) if ((offset + size) <= adev->mc.visible_vram_size) return 0; + /* Can't move a pinned BO to visible VRAM */ + if (abo->pin_count > 0) + return -EINVAL; + /* hurrah the memory is not visible ! */ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ab34190859a8..f1a55d1888cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -384,9 +384,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct amdgpu_device *adev; + struct amdgpu_bo *abo; struct ttm_mem_reg *old_mem = &bo->mem; int r; + /* Can't move a pinned BO */ + abo = container_of(bo, struct amdgpu_bo, tbo); + if (WARN_ON_ONCE(abo->pin_count > 0)) + return -EINVAL; + adev = amdgpu_get_adev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { amdgpu_move_null(bo, new_mem); diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile index e195bf59da86..043e6ebab575 100644 --- a/drivers/gpu/drm/amd/powerplay/Makefile +++ b/drivers/gpu/drm/amd/powerplay/Makefile @@ -1,17 +1,17 @@ subdir-ccflags-y += -Iinclude/drm \ - -Idrivers/gpu/drm/amd/powerplay/inc/ \ - -Idrivers/gpu/drm/amd/include/asic_reg \ - -Idrivers/gpu/drm/amd/include \ - -Idrivers/gpu/drm/amd/powerplay/smumgr\ - -Idrivers/gpu/drm/amd/powerplay/hwmgr \ - -Idrivers/gpu/drm/amd/powerplay/eventmgr + -I$(FULL_AMD_PATH)/powerplay/inc/ \ + -I$(FULL_AMD_PATH)/include/asic_reg \ + -I$(FULL_AMD_PATH)/include \ + -I$(FULL_AMD_PATH)/powerplay/smumgr\ + -I$(FULL_AMD_PATH)/powerplay/hwmgr \ + -I$(FULL_AMD_PATH)/powerplay/eventmgr AMD_PP_PATH = ../powerplay PP_LIBS = smumgr hwmgr eventmgr -AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS))) +AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS))) include $(AMD_POWERPLAY) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 34f4bef3691f..b156481b50e8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -512,8 +512,10 @@ static int get_cac_tdp_table( hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL); - if (NULL == hwmgr->dyn_state.cac_dtp_table) + if (NULL == hwmgr->dyn_state.cac_dtp_table) { + kfree(tdp_table); return -ENOMEM; + } memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 1ffe9c329c46..d65dcaee3832 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -558,7 +558,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, if (!state->base.crtc || !fb) return 0; - crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)]; + crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc); mode = &crtc_state->adjusted_mode; state->src_x = s->src_x; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index a2596eb803fc..8ee1db866e80 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -380,7 +380,6 @@ EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); * drm_atomic_replace_property_blob - replace a blob property * @blob: a pointer to the member blob to be replaced * @new_blob: the new blob to replace with - * @expected_size: the expected size of the new blob * @replaced: whether the blob has been replaced * * RETURNS: diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2bb90faa0ee2..4befe25c81c7 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, struct drm_crtc_state *crtc_state; if (plane->state->crtc) { - crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)]; + crtc_state = drm_atomic_get_existing_crtc_state(state, + plane->state->crtc); if (WARN_ON(!crtc_state)) return; @@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, } if (plane_state->crtc) { - crtc_state = - state->crtc_states[drm_crtc_index(plane_state->crtc)]; + crtc_state = drm_atomic_get_existing_crtc_state(state, + plane_state->crtc); if (WARN_ON(!crtc_state)) return; @@ -374,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state) if (!conn_state->crtc || !conn_state->best_encoder) continue; - crtc_state = - state->crtc_states[drm_crtc_index(conn_state->crtc)]; + crtc_state = drm_atomic_get_existing_crtc_state(state, + conn_state->crtc); /* * Each encoder has at most one connector (since we always steal @@ -679,7 +680,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) if (!old_conn_state->crtc) continue; - old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; + old_crtc_state = drm_atomic_get_existing_crtc_state(old_state, + old_conn_state->crtc); if (!old_crtc_state->active || !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 7d58f594cffe..df64ed1c0139 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -179,7 +179,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, { struct drm_dp_aux_msg msg; unsigned int retry; - int err; + int err = 0; memset(&msg, 0, sizeof(msg)); msg.address = offset; @@ -187,6 +187,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, msg.buffer = buffer; msg.size = size; + mutex_lock(&aux->hw_mutex); + /* * The specification doesn't give any recommendation on how often to * retry native transactions. We used to retry 7 times like for @@ -195,25 +197,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, */ for (retry = 0; retry < 32; retry++) { - mutex_lock(&aux->hw_mutex); err = aux->transfer(aux, &msg); - mutex_unlock(&aux->hw_mutex); if (err < 0) { if (err == -EBUSY) continue; - return err; + goto unlock; } switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { case DP_AUX_NATIVE_REPLY_ACK: if (err < size) - return -EPROTO; - return err; + err = -EPROTO; + goto unlock; case DP_AUX_NATIVE_REPLY_NACK: - return -EIO; + err = -EIO; + goto unlock; case DP_AUX_NATIVE_REPLY_DEFER: usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); @@ -222,7 +223,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, } DRM_DEBUG_KMS("too many retries, giving up\n"); - return -EIO; + err = -EIO; + +unlock: + mutex_unlock(&aux->hw_mutex); + return err; } /** @@ -544,9 +549,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { - mutex_lock(&aux->hw_mutex); ret = aux->transfer(aux, msg); - mutex_unlock(&aux->hw_mutex); if (ret < 0) { if (ret == -EBUSY) continue; @@ -685,6 +688,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, memset(&msg, 0, sizeof(msg)); + mutex_lock(&aux->hw_mutex); + for (i = 0; i < num; i++) { msg.address = msgs[i].addr; drm_dp_i2c_msg_set_request(&msg, &msgs[i]); @@ -739,6 +744,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, msg.size = 0; (void)drm_dp_i2c_do_msg(aux, &msg); + mutex_unlock(&aux->hw_mutex); + return err; } diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 1f3eef6fb345..0506016e18e0 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire return ret; } -static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) +static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - bool was_interruptible; int ret; - mutex_lock(&dev->struct_mutex); - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; ret = i915_gem_object_set_to_gtt_domain(obj, false); - - dev_priv->mm.interruptible = was_interruptible; mutex_unlock(&dev->struct_mutex); - if (unlikely(ret)) - DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n"); + return ret; } static const struct dma_buf_ops i915_dmabuf_ops = { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index b04a64664673..65428cf233ce 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -196,7 +196,7 @@ void __exit msm_hdmi_phy_driver_unregister(void); int msm_hdmi_pll_8960_init(struct platform_device *pdev); int msm_hdmi_pll_8996_init(struct platform_device *pdev); #else -static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev); +static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev) { return -ENODEV; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index d52910e2c26c..c03b96709179 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -467,9 +467,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file) struct msm_file_private *ctx = file->driver_priv; struct msm_kms *kms = priv->kms; - if (kms) - kms->funcs->preclose(kms, file); - mutex_lock(&dev->struct_mutex); if (ctx == priv->lastctx) priv->lastctx = NULL; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 9bcabaada179..e32222c3d44f 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -55,7 +55,6 @@ struct msm_kms_funcs { struct drm_encoder *slave_encoder, bool is_cmd_mode); /* cleanup: */ - void (*preclose)(struct msm_kms *kms, struct drm_file *file); void (*destroy)(struct msm_kms *kms); }; diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 3cf8aab23a39..af267c35d813 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -97,11 +97,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, return omap_gem_get_pages(obj, &pages, true); } -static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, - enum dma_data_direction dir) +static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, + enum dma_data_direction dir) { struct drm_gem_object *obj = buffer->priv; omap_gem_put_pages(obj); + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index df7a1719c841..43cffb526b0c 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -510,6 +510,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder, { struct radeon_encoder_mst *mst_enc; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_connector_atom_dig *dig_connector; int bpp = 24; mst_enc = radeon_encoder->enc_priv; @@ -523,22 +524,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, 0); - { - struct radeon_connector_atom_dig *dig_connector; - int ret; - - dig_connector = mst_enc->connector->con_priv; - ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base, - dig_connector->dpcd, adjusted_mode->clock, - &dig_connector->dp_lane_count, - &dig_connector->dp_clock); - if (ret) { - dig_connector->dp_lane_count = 0; - dig_connector->dp_clock = 0; - } - DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, - dig_connector->dp_lane_count, dig_connector->dp_clock); - } + dig_connector = mst_enc->connector->con_priv; + dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd); + dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd); + DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, + dig_connector->dp_lane_count, dig_connector->dp_clock); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index dd46c38676db..2d901bf28a94 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -799,6 +799,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) if ((offset + size) <= rdev->mc.visible_vram_size) return 0; + /* Can't move a pinned BO to visible VRAM */ + if (rbo->pin_count > 0) + return -EINVAL; + /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 6d8c32377c6f..c008312e1bcd 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -397,9 +397,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; + struct radeon_bo *rbo; struct ttm_mem_reg *old_mem = &bo->mem; int r; + /* Can't move a pinned BO */ + rbo = container_of(bo, struct radeon_bo, tbo); + if (WARN_ON_ONCE(rbo->pin_count > 0)) + return -EINVAL; + rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { radeon_move_null(bo, new_mem); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index cb75ab72098a..af4df81c4e0c 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, { 0, 0, 0, 0 }, }; @@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, } ++p; } + /* limit mclk on all R7 370 parts for stability */ + if (rdev->pdev->device == 0x6811 && + rdev->pdev->revision == 0x81) + max_mclk = 120000; if (rps->vce_active) { rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 3d3cf2f8891e..d5cfef75fc80 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -271,8 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, if (!iores) return -ENXIO; - platform_set_drvdata(pdev, hdmi); - encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); /* * If we failed to find the CRTC(s) which this encoder is @@ -293,7 +291,16 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); - return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); + ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); + + /* + * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(), + * which would have called the encoder cleanup. Do it manually. + */ + if (ret) + drm_encoder_cleanup(encoder); + + return ret; } static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 896da09e49ee..f556a8f4fde6 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -251,6 +251,27 @@ static int rockchip_drm_unload(struct drm_device *drm_dev) return 0; } +static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc, + struct drm_file *file_priv) +{ + struct rockchip_drm_private *priv = crtc->dev->dev_private; + int pipe = drm_crtc_index(crtc); + + if (pipe < ROCKCHIP_MAX_CRTC && + priv->crtc_funcs[pipe] && + priv->crtc_funcs[pipe]->cancel_pending_vblank) + priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv); +} + +static void rockchip_drm_preclose(struct drm_device *dev, + struct drm_file *file_priv) +{ + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv); +} + void rockchip_drm_lastclose(struct drm_device *dev) { struct rockchip_drm_private *priv = dev->dev_private; @@ -281,6 +302,7 @@ static struct drm_driver rockchip_drm_driver = { DRIVER_PRIME | DRIVER_ATOMIC, .load = rockchip_drm_load, .unload = rockchip_drm_unload, + .preclose = rockchip_drm_preclose, .lastclose = rockchip_drm_lastclose, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rockchip_drm_crtc_enable_vblank, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 3529f692edb8..00d17d71aa4c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -40,6 +40,7 @@ struct rockchip_crtc_funcs { int (*enable_vblank)(struct drm_crtc *crtc); void (*disable_vblank)(struct drm_crtc *crtc); void (*wait_for_update)(struct drm_crtc *crtc); + void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv); }; struct rockchip_atomic_commit { diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index fd370548d7d7..a619f120f801 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -499,10 +499,25 @@ err_disable_hclk: static void vop_crtc_disable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); + int i; if (!vop->is_enabled) return; + /* + * We need to make sure that all windows are disabled before we + * disable that crtc. Otherwise we might try to scan from a destroyed + * buffer later. + */ + for (i = 0; i < vop->data->win_size; i++) { + struct vop_win *vop_win = &vop->win[i]; + const struct vop_win_data *win = vop_win->data; + + spin_lock(&vop->reg_lock); + VOP_WIN_SET(vop, win, enable, 0); + spin_unlock(&vop->reg_lock); + } + drm_crtc_vblank_off(crtc); /* @@ -549,6 +564,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; struct drm_framebuffer *fb = state->fb; struct vop_win *vop_win = to_vop_win(plane); struct vop_plane_state *vop_plane_state = to_vop_plane_state(state); @@ -563,12 +579,13 @@ static int vop_plane_atomic_check(struct drm_plane *plane, int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : DRM_PLANE_HELPER_NO_SCALING; - crtc = crtc ? crtc : plane->state->crtc; - /* - * Both crtc or plane->state->crtc can be null. - */ if (!crtc || !fb) goto out_disable; + + crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + src->x1 = state->src_x; src->y1 = state->src_y; src->x2 = state->src_x + state->src_w; @@ -580,8 +597,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane, clip.x1 = 0; clip.y1 = 0; - clip.x2 = crtc->mode.hdisplay; - clip.y2 = crtc->mode.vdisplay; + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; ret = drm_plane_helper_check_update(plane, crtc, state->fb, src, dest, &clip, @@ -873,10 +890,30 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc) WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); } +static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc, + struct drm_file *file_priv) +{ + struct drm_device *drm = crtc->dev; + struct vop *vop = to_vop(crtc); + struct drm_pending_vblank_event *e; + unsigned long flags; + + spin_lock_irqsave(&drm->event_lock, flags); + e = vop->event; + if (e && e->base.file_priv == file_priv) { + vop->event = NULL; + + e->base.destroy(&e->base); + file_priv->event_space += sizeof(e->event); + } + spin_unlock_irqrestore(&drm->event_lock, flags); +} + static const struct rockchip_crtc_funcs private_crtc_funcs = { .enable_vblank = vop_crtc_enable_vblank, .disable_vblank = vop_crtc_disable_vblank, .wait_for_update = vop_crtc_wait_for_update, + .cancel_pending_vblank = vop_crtc_cancel_pending_vblank, }; static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, @@ -885,9 +922,6 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, { struct vop *vop = to_vop(crtc); - if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0) - return false; - adjusted_mode->clock = clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; @@ -1108,7 +1142,7 @@ static int vop_create_crtc(struct vop *vop) const struct vop_data *vop_data = vop->data; struct device *dev = vop->dev; struct drm_device *drm_dev = vop->drm_dev; - struct drm_plane *primary = NULL, *cursor = NULL, *plane; + struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp; struct drm_crtc *crtc = &vop->crtc; struct device_node *port; int ret; @@ -1148,7 +1182,7 @@ static int vop_create_crtc(struct vop *vop) ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, &vop_crtc_funcs, NULL); if (ret) - return ret; + goto err_cleanup_planes; drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); @@ -1181,6 +1215,7 @@ static int vop_create_crtc(struct vop *vop) if (!port) { DRM_ERROR("no port node found in %s\n", dev->of_node->full_name); + ret = -ENOENT; goto err_cleanup_crtc; } @@ -1194,7 +1229,8 @@ static int vop_create_crtc(struct vop *vop) err_cleanup_crtc: drm_crtc_cleanup(crtc); err_cleanup_planes: - list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head) + list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, + head) drm_plane_cleanup(plane); return ret; } @@ -1202,9 +1238,28 @@ err_cleanup_planes: static void vop_destroy_crtc(struct vop *vop) { struct drm_crtc *crtc = &vop->crtc; + struct drm_device *drm_dev = vop->drm_dev; + struct drm_plane *plane, *tmp; rockchip_unregister_crtc_funcs(crtc); of_node_put(crtc->port); + + /* + * We need to cleanup the planes now. Why? + * + * The planes are "&vop->win[i].base". That means the memory is + * all part of the big "struct vop" chunk of memory. That memory + * was devm allocated and associated with this component. We need to + * free it ourselves before vop_unbind() finishes. + */ + list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, + head) + vop_plane_destroy(plane); + + /* + * Destroy CRTC after vop_plane_destroy() since vop_disable_plane() + * references the CRTC. + */ drm_crtc_cleanup(crtc); } diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index c427499133d6..fd1eb9d03f0b 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -423,8 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, } if (ufb->obj->base.import_attach) { - dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, - DMA_FROM_DEVICE); + ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf, + DMA_FROM_DEVICE); } unlock: @@ -536,7 +536,7 @@ static int udlfb_create(struct drm_fb_helper *helper, out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_gfree: - drm_gem_object_unreference(&ufbdev->ufb.obj->base); + drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); out: return ret; } diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 2a0a784ab6ee..d7528e0d8442 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file, return ret; } - drm_gem_object_unreference(&obj->base); + drm_gem_object_unreference_unlocked(&obj->base); *handle_p = handle; return 0; } diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c index 36544c4f653c..303d0c9df907 100644 --- a/drivers/hwmon/max1111.c +++ b/drivers/hwmon/max1111.c @@ -85,6 +85,9 @@ static struct max1111_data *the_max1111; int max1111_read_channel(int channel) { + if (!the_max1111 || !the_max1111->spi) + return -ENODEV; + return max1111_read(&the_max1111->spi->dev, channel); } EXPORT_SYMBOL(max1111_read_channel); @@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi) { struct max1111_data *data = spi_get_drvdata(spi); +#ifdef CONFIG_SHARPSL_PM + the_max1111 = NULL; +#endif hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group); sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 9f0a48e39b8a..80e933b296f6 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c @@ -451,7 +451,7 @@ err_free: return ret; } -static const struct ide_port_info icside_v6_port_info __initconst = { +static const struct ide_port_info icside_v6_port_info = { .init_dma = icside_dma_off_init, .port_ops = &icside_v6_no_dma_port_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c index 8012e43bf8f6..46427ea01753 100644 --- a/drivers/ide/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c @@ -325,6 +325,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) clk_enable(clk); rate = clk_get_rate(clk); + if (!rate) + return -EINVAL; /* NOTE: round *down* to meet minimum timings; we count in clocks */ ideclk_period = 1000000000UL / rate; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index cd4510a63375..ba947df5a8c7 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -65,7 +65,7 @@ #include <asm/mwait.h> #include <asm/msr.h> -#define INTEL_IDLE_VERSION "0.4" +#define INTEL_IDLE_VERSION "0.4.1" #define PREFIX "intel_idle: " static struct cpuidle_driver intel_idle_driver = { @@ -716,6 +716,26 @@ static struct cpuidle_state avn_cstates[] = { { .enter = NULL } }; +static struct cpuidle_state knl_cstates[] = { + { + .name = "C1-KNL", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 2, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze }, + { + .name = "C6-KNL", + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 120, + .target_residency = 500, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze }, + { + .enter = NULL } +}; /** * intel_idle @@ -890,6 +910,10 @@ static const struct idle_cpu idle_cpu_avn = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_knl = { + .state_table = knl_cstates, +}; + #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } @@ -921,6 +945,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(0x56, idle_cpu_bdw), ICPU(0x4e, idle_cpu_skl), ICPU(0x5e, idle_cpu_skl), + ICPU(0x57, idle_cpu_knl), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); @@ -994,36 +1019,92 @@ static void intel_idle_cpuidle_devices_uninit(void) } /* - * intel_idle_state_table_update() + * ivt_idle_state_table_update(void) * - * Update the default state_table for this CPU-id - * - * Currently used to access tuned IVT multi-socket targets + * Tune IVT multi-socket targets * Assumption: num_sockets == (max_package_num + 1) */ -void intel_idle_state_table_update(void) +static void ivt_idle_state_table_update(void) { /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ - if (boot_cpu_data.x86_model == 0x3e) { /* IVT */ - int cpu, package_num, num_sockets = 1; - - for_each_online_cpu(cpu) { - package_num = topology_physical_package_id(cpu); - if (package_num + 1 > num_sockets) { - num_sockets = package_num + 1; - - if (num_sockets > 4) { - cpuidle_state_table = ivt_cstates_8s; - return; - } + int cpu, package_num, num_sockets = 1; + + for_each_online_cpu(cpu) { + package_num = topology_physical_package_id(cpu); + if (package_num + 1 > num_sockets) { + num_sockets = package_num + 1; + + if (num_sockets > 4) { + cpuidle_state_table = ivt_cstates_8s; + return; } } + } + + if (num_sockets > 2) + cpuidle_state_table = ivt_cstates_4s; - if (num_sockets > 2) - cpuidle_state_table = ivt_cstates_4s; - /* else, 1 and 2 socket systems use default ivt_cstates */ + /* else, 1 and 2 socket systems use default ivt_cstates */ +} +/* + * sklh_idle_state_table_update(void) + * + * On SKL-H (model 0x5e) disable C8 and C9 if: + * C10 is enabled and SGX disabled + */ +static void sklh_idle_state_table_update(void) +{ + unsigned long long msr; + unsigned int eax, ebx, ecx, edx; + + + /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */ + if (max_cstate <= 7) + return; + + /* if PC10 not present in CPUID.MWAIT.EDX */ + if ((mwait_substates & (0xF << 28)) == 0) + return; + + rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr); + + /* PC10 is not enabled in PKG C-state limit */ + if ((msr & 0xF) != 8) + return; + + ecx = 0; + cpuid(7, &eax, &ebx, &ecx, &edx); + + /* if SGX is present */ + if (ebx & (1 << 2)) { + + rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); + + /* if SGX is enabled */ + if (msr & (1 << 18)) + return; + } + + skl_cstates[5].disabled = 1; /* C8-SKL */ + skl_cstates[6].disabled = 1; /* C9-SKL */ +} +/* + * intel_idle_state_table_update() + * + * Update the default state_table for this CPU-id + */ + +static void intel_idle_state_table_update(void) +{ + switch (boot_cpu_data.x86_model) { + + case 0x3e: /* IVT */ + ivt_idle_state_table_update(); + break; + case 0x5e: /* SKL-H */ + sklh_idle_state_table_update(); + break; } - return; } /* @@ -1063,6 +1144,14 @@ static int __init intel_idle_cpuidle_driver_init(void) if (num_substates == 0) continue; + /* if state marked as disabled, skip it */ + if (cpuidle_state_table[cstate].disabled != 0) { + pr_debug(PREFIX "state %s is disabled", + cpuidle_state_table[cstate].name); + continue; + } + + if (((mwait_cstate + 1) > 2) && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) mark_tsc_unstable("TSC halts in idle" diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c index 64ca7113ff28..d84d20b9cec0 100644 --- a/drivers/input/input-compat.c +++ b/drivers/input/input-compat.c @@ -17,7 +17,7 @@ int input_event_from_user(const char __user *buffer, struct input_event *event) { - if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) { + if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { struct input_event_compat compat_event; if (copy_from_user(&compat_event, buffer, @@ -41,7 +41,7 @@ int input_event_from_user(const char __user *buffer, int input_event_to_user(char __user *buffer, const struct input_event *event) { - if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) { + if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { struct input_event_compat compat_event; compat_event.time.tv_sec = event->time.tv_sec; @@ -65,7 +65,7 @@ int input_event_to_user(char __user *buffer, int input_ff_effect_from_user(const char __user *buffer, size_t size, struct ff_effect *effect) { - if (INPUT_COMPAT_TEST) { + if (in_compat_syscall()) { struct ff_effect_compat *compat_effect; if (size != sizeof(struct ff_effect_compat)) diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h index 0f25878d5fa2..1563160a7af3 100644 --- a/drivers/input/input-compat.h +++ b/drivers/input/input-compat.h @@ -17,8 +17,6 @@ #ifdef CONFIG_COMPAT -#define INPUT_COMPAT_TEST in_compat_syscall() - struct input_event_compat { struct compat_timeval time; __u16 type; @@ -57,7 +55,7 @@ struct ff_effect_compat { static inline size_t input_event_size(void) { - return (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) ? + return (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) ? sizeof(struct input_event_compat) : sizeof(struct input_event); } diff --git a/drivers/input/input.c b/drivers/input/input.c index 880605959aa6..b87ffbd4547d 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1015,7 +1015,7 @@ static int input_bits_to_string(char *buf, int buf_size, { int len = 0; - if (INPUT_COMPAT_TEST) { + if (in_compat_syscall()) { u32 dword = bits >> 32; if (dword || !skip_empty) len += snprintf(buf, buf_size, "%x ", dword); diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index cfd58e87da26..1c5914cae853 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c @@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d ar2->udev = udev; + /* Sanity check, first interface must have an endpoint */ + if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) { + dev_err(&interface->dev, + "%s(): interface 0 must have an endpoint\n", __func__); + r = -ENODEV; + goto fail1; + } ar2->intf[0] = interface; ar2->ep[0] = &alt->endpoint[0].desc; + /* Sanity check, the device must have two interfaces */ ar2->intf[1] = usb_ifnum_to_if(udev, 1); + if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) { + dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n", + __func__, udev->actconfig->desc.bNumInterfaces); + r = -ENODEV; + goto fail1; + } + r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2); if (r) goto fail1; + + /* Sanity check, second interface must have an endpoint */ alt = ar2->intf[1]->cur_altsetting; + if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) { + dev_err(&interface->dev, + "%s(): interface 1 must have an endpoint\n", __func__); + r = -ENODEV; + goto fail2; + } ar2->ep[1] = &alt->endpoint[0].desc; r = ati_remote2_urb_init(ar2); if (r) - goto fail2; + goto fail3; ar2->channel_mask = channel_mask; ar2->mode_mask = mode_mask; r = ati_remote2_setup(ar2, ar2->channel_mask); if (r) - goto fail2; + goto fail3; usb_make_path(udev, ar2->phys, sizeof(ar2->phys)); strlcat(ar2->phys, "/input0", sizeof(ar2->phys)); @@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group); if (r) - goto fail2; + goto fail3; r = ati_remote2_input_init(ar2); if (r) - goto fail3; + goto fail4; usb_set_intfdata(interface, ar2); @@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d return 0; - fail3: + fail4: sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group); - fail2: + fail3: ati_remote2_urb_cleanup(ar2); + fail2: usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); fail1: kfree(ar2); diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index ac1fa5f44580..9c0ea36913b4 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev, union_desc->bMasterInterface0); + if (!pcu->ctrl_intf) + return -EINVAL; alt = pcu->ctrl_intf->cur_altsetting; pcu->ep_ctrl = &alt->endpoint[0].desc; @@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc pcu->data_intf = usb_ifnum_to_if(pcu->udev, union_desc->bSlaveInterface0); + if (!pcu->data_intf) + return -EINVAL; alt = pcu->data_intf->cur_altsetting; if (alt->desc.bNumEndpoints != 2) { diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 4eb9e4d94f46..abe1a927b332 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -664,7 +664,7 @@ struct uinput_ff_upload_compat { static int uinput_ff_upload_to_user(char __user *buffer, const struct uinput_ff_upload *ff_up) { - if (INPUT_COMPAT_TEST) { + if (in_compat_syscall()) { struct uinput_ff_upload_compat ff_up_compat; ff_up_compat.request_id = ff_up->request_id; @@ -695,7 +695,7 @@ static int uinput_ff_upload_to_user(char __user *buffer, static int uinput_ff_upload_from_user(const char __user *buffer, struct uinput_ff_upload *ff_up) { - if (INPUT_COMPAT_TEST) { + if (in_compat_syscall()) { struct uinput_ff_upload_compat ff_up_compat; if (copy_from_user(&ff_up_compat, buffer, diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c index 9425e0f6c5ce..fdc243ca93ed 100644 --- a/drivers/input/mouse/byd.c +++ b/drivers/input/mouse/byd.c @@ -12,10 +12,12 @@ #include <linux/input.h> #include <linux/libps2.h> #include <linux/serio.h> +#include <linux/slab.h> #include "psmouse.h" #include "byd.h" +/* PS2 Bits */ #define PS2_Y_OVERFLOW BIT_MASK(7) #define PS2_X_OVERFLOW BIT_MASK(6) #define PS2_Y_SIGN BIT_MASK(5) @@ -26,69 +28,249 @@ #define PS2_LEFT BIT_MASK(0) /* - * The touchpad reports gestures in the last byte of each packet. It can take - * any of the following values: + * BYD pad constants */ -/* One-finger scrolling in one of the edge scroll zones. */ -#define BYD_SCROLLUP 0xCA -#define BYD_SCROLLDOWN 0x36 -#define BYD_SCROLLLEFT 0xCB -#define BYD_SCROLLRIGHT 0x35 -/* Two-finger scrolling. */ -#define BYD_2DOWN 0x2B -#define BYD_2UP 0xD5 -#define BYD_2LEFT 0xD6 -#define BYD_2RIGHT 0x2A -/* Pinching in or out. */ -#define BYD_ZOOMOUT 0xD8 -#define BYD_ZOOMIN 0x28 -/* Three-finger swipe. */ -#define BYD_3UP 0xD3 -#define BYD_3DOWN 0x2D -#define BYD_3LEFT 0xD4 -#define BYD_3RIGHT 0x2C -/* Four-finger swipe. */ -#define BYD_4UP 0xCD -#define BYD_4DOWN 0x33 +/* + * True device resolution is unknown, however experiments show the + * resolution is about 111 units/mm. + * Absolute coordinate packets are in the range 0-255 for both X and Y + * we pick ABS_X/ABS_Y dimensions which are multiples of 256 and in + * the right ballpark given the touchpad's physical dimensions and estimate + * resolution per spec sheet, device active area dimensions are + * 101.6 x 60.1 mm. + */ +#define BYD_PAD_WIDTH 11264 +#define BYD_PAD_HEIGHT 6656 +#define BYD_PAD_RESOLUTION 111 -int byd_detect(struct psmouse *psmouse, bool set_properties) +/* + * Given the above dimensions, relative packets velocity is in multiples of + * 1 unit / 11 milliseconds. We use this dt to estimate distance traveled + */ +#define BYD_DT 11 +/* Time in jiffies used to timeout various touch events (64 ms) */ +#define BYD_TOUCH_TIMEOUT msecs_to_jiffies(64) + +/* BYD commands reverse engineered from windows driver */ + +/* + * Swipe gesture from off-pad to on-pad + * 0 : disable + * 1 : enable + */ +#define BYD_CMD_SET_OFFSCREEN_SWIPE 0x10cc +/* + * Tap and drag delay time + * 0 : disable + * 1 - 8 : least to most delay + */ +#define BYD_CMD_SET_TAP_DRAG_DELAY_TIME 0x10cf +/* + * Physical buttons function mapping + * 0 : enable + * 4 : normal + * 5 : left button custom command + * 6 : right button custom command + * 8 : disable + */ +#define BYD_CMD_SET_PHYSICAL_BUTTONS 0x10d0 +/* + * Absolute mode (1 byte X/Y resolution) + * 0 : disable + * 2 : enable + */ +#define BYD_CMD_SET_ABSOLUTE_MODE 0x10d1 +/* + * Two finger scrolling + * 1 : vertical + * 2 : horizontal + * 3 : vertical + horizontal + * 4 : disable + */ +#define BYD_CMD_SET_TWO_FINGER_SCROLL 0x10d2 +/* + * Handedness + * 1 : right handed + * 2 : left handed + */ +#define BYD_CMD_SET_HANDEDNESS 0x10d3 +/* + * Tap to click + * 1 : enable + * 2 : disable + */ +#define BYD_CMD_SET_TAP 0x10d4 +/* + * Tap and drag + * 1 : tap and hold to drag + * 2 : tap and hold to drag + lock + * 3 : disable + */ +#define BYD_CMD_SET_TAP_DRAG 0x10d5 +/* + * Touch sensitivity + * 1 - 7 : least to most sensitive + */ +#define BYD_CMD_SET_TOUCH_SENSITIVITY 0x10d6 +/* + * One finger scrolling + * 1 : vertical + * 2 : horizontal + * 3 : vertical + horizontal + * 4 : disable + */ +#define BYD_CMD_SET_ONE_FINGER_SCROLL 0x10d7 +/* + * One finger scrolling function + * 1 : free scrolling + * 2 : edge motion + * 3 : free scrolling + edge motion + * 4 : disable + */ +#define BYD_CMD_SET_ONE_FINGER_SCROLL_FUNC 0x10d8 +/* + * Sliding speed + * 1 - 5 : slowest to fastest + */ +#define BYD_CMD_SET_SLIDING_SPEED 0x10da +/* + * Edge motion + * 1 : disable + * 2 : enable when dragging + * 3 : enable when dragging and pointing + */ +#define BYD_CMD_SET_EDGE_MOTION 0x10db +/* + * Left edge region size + * 0 - 7 : smallest to largest width + */ +#define BYD_CMD_SET_LEFT_EDGE_REGION 0x10dc +/* + * Top edge region size + * 0 - 9 : smallest to largest height + */ +#define BYD_CMD_SET_TOP_EDGE_REGION 0x10dd +/* + * Disregard palm press as clicks + * 1 - 6 : smallest to largest + */ +#define BYD_CMD_SET_PALM_CHECK 0x10de +/* + * Right edge region size + * 0 - 7 : smallest to largest width + */ +#define BYD_CMD_SET_RIGHT_EDGE_REGION 0x10df +/* + * Bottom edge region size + * 0 - 9 : smallest to largest height + */ +#define BYD_CMD_SET_BOTTOM_EDGE_REGION 0x10e1 +/* + * Multitouch gestures + * 1 : enable + * 2 : disable + */ +#define BYD_CMD_SET_MULTITOUCH 0x10e3 +/* + * Edge motion speed + * 0 : control with finger pressure + * 1 - 9 : slowest to fastest + */ +#define BYD_CMD_SET_EDGE_MOTION_SPEED 0x10e4 +/* + * Two finger scolling function + * 0 : free scrolling + * 1 : free scrolling (with momentum) + * 2 : edge motion + * 3 : free scrolling (with momentum) + edge motion + * 4 : disable + */ +#define BYD_CMD_SET_TWO_FINGER_SCROLL_FUNC 0x10e5 + +/* + * The touchpad generates a mixture of absolute and relative packets, indicated + * by the the last byte of each packet being set to one of the following: + */ +#define BYD_PACKET_ABSOLUTE 0xf8 +#define BYD_PACKET_RELATIVE 0x00 +/* Multitouch gesture packets */ +#define BYD_PACKET_PINCH_IN 0xd8 +#define BYD_PACKET_PINCH_OUT 0x28 +#define BYD_PACKET_ROTATE_CLOCKWISE 0x29 +#define BYD_PACKET_ROTATE_ANTICLOCKWISE 0xd7 +#define BYD_PACKET_TWO_FINGER_SCROLL_RIGHT 0x2a +#define BYD_PACKET_TWO_FINGER_SCROLL_DOWN 0x2b +#define BYD_PACKET_TWO_FINGER_SCROLL_UP 0xd5 +#define BYD_PACKET_TWO_FINGER_SCROLL_LEFT 0xd6 +#define BYD_PACKET_THREE_FINGER_SWIPE_RIGHT 0x2c +#define BYD_PACKET_THREE_FINGER_SWIPE_DOWN 0x2d +#define BYD_PACKET_THREE_FINGER_SWIPE_UP 0xd3 +#define BYD_PACKET_THREE_FINGER_SWIPE_LEFT 0xd4 +#define BYD_PACKET_FOUR_FINGER_DOWN 0x33 +#define BYD_PACKET_FOUR_FINGER_UP 0xcd +#define BYD_PACKET_REGION_SCROLL_RIGHT 0x35 +#define BYD_PACKET_REGION_SCROLL_DOWN 0x36 +#define BYD_PACKET_REGION_SCROLL_UP 0xca +#define BYD_PACKET_REGION_SCROLL_LEFT 0xcb +#define BYD_PACKET_RIGHT_CORNER_CLICK 0xd2 +#define BYD_PACKET_LEFT_CORNER_CLICK 0x2e +#define BYD_PACKET_LEFT_AND_RIGHT_CORNER_CLICK 0x2f +#define BYD_PACKET_ONTO_PAD_SWIPE_RIGHT 0x37 +#define BYD_PACKET_ONTO_PAD_SWIPE_DOWN 0x30 +#define BYD_PACKET_ONTO_PAD_SWIPE_UP 0xd0 +#define BYD_PACKET_ONTO_PAD_SWIPE_LEFT 0xc9 + +struct byd_data { + struct timer_list timer; + s32 abs_x; + s32 abs_y; + typeof(jiffies) last_touch_time; + bool btn_left; + bool btn_right; + bool touch; +}; + +static void byd_report_input(struct psmouse *psmouse) { - struct ps2dev *ps2dev = &psmouse->ps2dev; - unsigned char param[4]; + struct byd_data *priv = psmouse->private; + struct input_dev *dev = psmouse->dev; - param[0] = 0x03; - param[1] = 0x00; - param[2] = 0x00; - param[3] = 0x00; + input_report_key(dev, BTN_TOUCH, priv->touch); + input_report_key(dev, BTN_TOOL_FINGER, priv->touch); - if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) - return -1; - if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) - return -1; - if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) - return -1; - if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) - return -1; - if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) - return -1; + input_report_abs(dev, ABS_X, priv->abs_x); + input_report_abs(dev, ABS_Y, priv->abs_y); + input_report_key(dev, BTN_LEFT, priv->btn_left); + input_report_key(dev, BTN_RIGHT, priv->btn_right); - if (param[1] != 0x03 || param[2] != 0x64) - return -ENODEV; + input_sync(dev); +} - psmouse_dbg(psmouse, "BYD touchpad detected\n"); +static void byd_clear_touch(unsigned long data) +{ + struct psmouse *psmouse = (struct psmouse *)data; + struct byd_data *priv = psmouse->private; - if (set_properties) { - psmouse->vendor = "BYD"; - psmouse->name = "TouchPad"; - } + serio_pause_rx(psmouse->ps2dev.serio); + priv->touch = false; - return 0; + byd_report_input(psmouse); + + serio_continue_rx(psmouse->ps2dev.serio); + + /* + * Move cursor back to center of pad when we lose touch - this + * specifically improves user experience when moving cursor with one + * finger, and pressing a button with another. + */ + priv->abs_x = BYD_PAD_WIDTH / 2; + priv->abs_y = BYD_PAD_HEIGHT / 2; } static psmouse_ret_t byd_process_byte(struct psmouse *psmouse) { - struct input_dev *dev = psmouse->dev; + struct byd_data *priv = psmouse->private; u8 *pkt = psmouse->packet; if (psmouse->pktcnt > 0 && !(pkt[0] & PS2_ALWAYS_1)) { @@ -102,53 +284,34 @@ static psmouse_ret_t byd_process_byte(struct psmouse *psmouse) /* Otherwise, a full packet has been received */ switch (pkt[3]) { - case 0: { + case BYD_PACKET_ABSOLUTE: + /* Only use absolute packets for the start of movement. */ + if (!priv->touch) { + /* needed to detect tap */ + typeof(jiffies) tap_time = + priv->last_touch_time + BYD_TOUCH_TIMEOUT; + priv->touch = time_after(jiffies, tap_time); + + /* init abs position */ + priv->abs_x = pkt[1] * (BYD_PAD_WIDTH / 256); + priv->abs_y = (255 - pkt[2]) * (BYD_PAD_HEIGHT / 256); + } + break; + case BYD_PACKET_RELATIVE: { /* Standard packet */ /* Sign-extend if a sign bit is set. */ - unsigned int signx = pkt[0] & PS2_X_SIGN ? ~0xFF : 0; - unsigned int signy = pkt[0] & PS2_Y_SIGN ? ~0xFF : 0; - int dx = signx | (int) pkt[1]; - int dy = signy | (int) pkt[2]; + u32 signx = pkt[0] & PS2_X_SIGN ? ~0xFF : 0; + u32 signy = pkt[0] & PS2_Y_SIGN ? ~0xFF : 0; + s32 dx = signx | (int) pkt[1]; + s32 dy = signy | (int) pkt[2]; - input_report_rel(psmouse->dev, REL_X, dx); - input_report_rel(psmouse->dev, REL_Y, -dy); + /* Update position based on velocity */ + priv->abs_x += dx * BYD_DT; + priv->abs_y -= dy * BYD_DT; - input_report_key(psmouse->dev, BTN_LEFT, pkt[0] & PS2_LEFT); - input_report_key(psmouse->dev, BTN_RIGHT, pkt[0] & PS2_RIGHT); - input_report_key(psmouse->dev, BTN_MIDDLE, pkt[0] & PS2_MIDDLE); + priv->touch = true; break; } - - case BYD_SCROLLDOWN: - case BYD_2DOWN: - input_report_rel(dev, REL_WHEEL, -1); - break; - - case BYD_SCROLLUP: - case BYD_2UP: - input_report_rel(dev, REL_WHEEL, 1); - break; - - case BYD_SCROLLLEFT: - case BYD_2LEFT: - input_report_rel(dev, REL_HWHEEL, -1); - break; - - case BYD_SCROLLRIGHT: - case BYD_2RIGHT: - input_report_rel(dev, REL_HWHEEL, 1); - break; - - case BYD_ZOOMOUT: - case BYD_ZOOMIN: - case BYD_3UP: - case BYD_3DOWN: - case BYD_3LEFT: - case BYD_3RIGHT: - case BYD_4UP: - case BYD_4DOWN: - break; - default: psmouse_warn(psmouse, "Unrecognized Z: pkt = %02x %02x %02x %02x\n", @@ -157,134 +320,76 @@ static psmouse_ret_t byd_process_byte(struct psmouse *psmouse) return PSMOUSE_BAD_DATA; } - input_sync(dev); + priv->btn_left = pkt[0] & PS2_LEFT; + priv->btn_right = pkt[0] & PS2_RIGHT; - return PSMOUSE_FULL_PACKET; -} + byd_report_input(psmouse); -/* Send a sequence of bytes, where each is ACKed before the next is sent. */ -static int byd_send_sequence(struct psmouse *psmouse, const u8 *seq, size_t len) -{ - unsigned int i; - - for (i = 0; i < len; ++i) { - if (ps2_command(&psmouse->ps2dev, NULL, seq[i])) - return -1; + /* Reset time since last touch. */ + if (priv->touch) { + priv->last_touch_time = jiffies; + mod_timer(&priv->timer, jiffies + BYD_TOUCH_TIMEOUT); } - return 0; -} - -/* Keep scrolling after fingers are removed. */ -#define SCROLL_INERTIAL 0x01 -#define SCROLL_NO_INERTIAL 0x02 - -/* Clicking can be done by tapping or pressing. */ -#define CLICK_BOTH 0x01 -/* Clicking can only be done by pressing. */ -#define CLICK_PRESS_ONLY 0x02 -static int byd_enable(struct psmouse *psmouse) -{ - const u8 seq1[] = { 0xE2, 0x00, 0xE0, 0x02, 0xE0 }; - const u8 seq2[] = { - 0xD3, 0x01, - 0xD0, 0x00, - 0xD0, 0x04, - /* Whether clicking is done by tapping or pressing. */ - 0xD4, CLICK_PRESS_ONLY, - 0xD5, 0x01, - 0xD7, 0x03, - /* Vertical and horizontal one-finger scroll zone inertia. */ - 0xD8, SCROLL_INERTIAL, - 0xDA, 0x05, - 0xDB, 0x02, - 0xE4, 0x05, - 0xD6, 0x01, - 0xDE, 0x04, - 0xE3, 0x01, - 0xCF, 0x00, - 0xD2, 0x03, - /* Vertical and horizontal two-finger scrolling inertia. */ - 0xE5, SCROLL_INERTIAL, - 0xD9, 0x02, - 0xD9, 0x07, - 0xDC, 0x03, - 0xDD, 0x03, - 0xDF, 0x03, - 0xE1, 0x03, - 0xD1, 0x00, - 0xCE, 0x00, - 0xCC, 0x00, - 0xE0, 0x00, - 0xE2, 0x01 - }; - u8 param[4]; - - if (byd_send_sequence(psmouse, seq1, ARRAY_SIZE(seq1))) - return -1; - - /* Send a 0x01 command, which should return 4 bytes. */ - if (ps2_command(&psmouse->ps2dev, param, 0x0401)) - return -1; - - if (byd_send_sequence(psmouse, seq2, ARRAY_SIZE(seq2))) - return -1; - - return 0; + return PSMOUSE_FULL_PACKET; } -/* - * Send the set of PS/2 commands required to make it identify as an - * intellimouse with 4-byte instead of 3-byte packets. - */ -static int byd_send_intellimouse_sequence(struct psmouse *psmouse) +static int byd_reset_touchpad(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; u8 param[4]; - int i; + size_t i; + const struct { u16 command; u8 arg; } seq[] = { - { PSMOUSE_CMD_RESET_BAT, 0 }, - { PSMOUSE_CMD_RESET_BAT, 0 }, - { PSMOUSE_CMD_GETID, 0 }, - { PSMOUSE_CMD_SETSCALE11, 0 }, - { PSMOUSE_CMD_SETSCALE11, 0 }, - { PSMOUSE_CMD_SETSCALE11, 0 }, - { PSMOUSE_CMD_GETINFO, 0 }, - { PSMOUSE_CMD_SETRES, 0x03 }, + /* + * Intellimouse initialization sequence, to get 4-byte instead + * of 3-byte packets. + */ { PSMOUSE_CMD_SETRATE, 0xC8 }, { PSMOUSE_CMD_SETRATE, 0x64 }, { PSMOUSE_CMD_SETRATE, 0x50 }, { PSMOUSE_CMD_GETID, 0 }, - { PSMOUSE_CMD_SETRATE, 0xC8 }, - { PSMOUSE_CMD_SETRATE, 0xC8 }, - { PSMOUSE_CMD_SETRATE, 0x50 }, - { PSMOUSE_CMD_GETID, 0 }, - { PSMOUSE_CMD_SETRATE, 0x64 }, - { PSMOUSE_CMD_SETRES, 0x03 }, - { PSMOUSE_CMD_ENABLE, 0 } + { PSMOUSE_CMD_ENABLE, 0 }, + /* + * BYD-specific initialization, which enables absolute mode and + * (if desired), the touchpad's built-in gesture detection. + */ + { 0x10E2, 0x00 }, + { 0x10E0, 0x02 }, + /* The touchpad should reply with 4 seemingly-random bytes */ + { 0x14E0, 0x01 }, + /* Pairs of parameters and values. */ + { BYD_CMD_SET_HANDEDNESS, 0x01 }, + { BYD_CMD_SET_PHYSICAL_BUTTONS, 0x04 }, + { BYD_CMD_SET_TAP, 0x02 }, + { BYD_CMD_SET_ONE_FINGER_SCROLL, 0x04 }, + { BYD_CMD_SET_ONE_FINGER_SCROLL_FUNC, 0x04 }, + { BYD_CMD_SET_EDGE_MOTION, 0x01 }, + { BYD_CMD_SET_PALM_CHECK, 0x00 }, + { BYD_CMD_SET_MULTITOUCH, 0x02 }, + { BYD_CMD_SET_TWO_FINGER_SCROLL, 0x04 }, + { BYD_CMD_SET_TWO_FINGER_SCROLL_FUNC, 0x04 }, + { BYD_CMD_SET_LEFT_EDGE_REGION, 0x00 }, + { BYD_CMD_SET_TOP_EDGE_REGION, 0x00 }, + { BYD_CMD_SET_RIGHT_EDGE_REGION, 0x00 }, + { BYD_CMD_SET_BOTTOM_EDGE_REGION, 0x00 }, + { BYD_CMD_SET_ABSOLUTE_MODE, 0x02 }, + /* Finalize initialization. */ + { 0x10E0, 0x00 }, + { 0x10E2, 0x01 }, }; - memset(param, 0, sizeof(param)); for (i = 0; i < ARRAY_SIZE(seq); ++i) { + memset(param, 0, sizeof(param)); param[0] = seq[i].arg; if (ps2_command(ps2dev, param, seq[i].command)) - return -1; + return -EIO; } - return 0; -} - -static int byd_reset_touchpad(struct psmouse *psmouse) -{ - if (byd_send_intellimouse_sequence(psmouse)) - return -EIO; - - if (byd_enable(psmouse)) - return -EIO; - + psmouse_set_state(psmouse, PSMOUSE_ACTIVATED); return 0; } @@ -314,9 +419,50 @@ static int byd_reconnect(struct psmouse *psmouse) return 0; } +static void byd_disconnect(struct psmouse *psmouse) +{ + struct byd_data *priv = psmouse->private; + + if (priv) { + del_timer(&priv->timer); + kfree(psmouse->private); + psmouse->private = NULL; + } +} + +int byd_detect(struct psmouse *psmouse, bool set_properties) +{ + struct ps2dev *ps2dev = &psmouse->ps2dev; + u8 param[4] = {0x03, 0x00, 0x00, 0x00}; + + if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) + return -1; + if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) + return -1; + if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) + return -1; + if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES)) + return -1; + if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) + return -1; + + if (param[1] != 0x03 || param[2] != 0x64) + return -ENODEV; + + psmouse_dbg(psmouse, "BYD touchpad detected\n"); + + if (set_properties) { + psmouse->vendor = "BYD"; + psmouse->name = "TouchPad"; + } + + return 0; +} + int byd_init(struct psmouse *psmouse) { struct input_dev *dev = psmouse->dev; + struct byd_data *priv; if (psmouse_reset(psmouse)) return -EIO; @@ -324,14 +470,39 @@ int byd_init(struct psmouse *psmouse) if (byd_reset_touchpad(psmouse)) return -EIO; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + memset(priv, 0, sizeof(*priv)); + setup_timer(&priv->timer, byd_clear_touch, (unsigned long) psmouse); + + psmouse->private = priv; + psmouse->disconnect = byd_disconnect; psmouse->reconnect = byd_reconnect; psmouse->protocol_handler = byd_process_byte; psmouse->pktsize = 4; psmouse->resync_time = 0; - __set_bit(BTN_MIDDLE, dev->keybit); - __set_bit(REL_WHEEL, dev->relbit); - __set_bit(REL_HWHEEL, dev->relbit); + __set_bit(INPUT_PROP_POINTER, dev->propbit); + /* Touchpad */ + __set_bit(BTN_TOUCH, dev->keybit); + __set_bit(BTN_TOOL_FINGER, dev->keybit); + /* Buttons */ + __set_bit(BTN_LEFT, dev->keybit); + __set_bit(BTN_RIGHT, dev->keybit); + __clear_bit(BTN_MIDDLE, dev->keybit); + + /* Absolute position */ + __set_bit(EV_ABS, dev->evbit); + input_set_abs_params(dev, ABS_X, 0, BYD_PAD_WIDTH, 0, 0); + input_set_abs_params(dev, ABS_Y, 0, BYD_PAD_HEIGHT, 0, 0); + input_abs_set_res(dev, ABS_X, BYD_PAD_RESOLUTION); + input_abs_set_res(dev, ABS_Y, BYD_PAD_RESOLUTION); + /* No relative support */ + __clear_bit(EV_REL, dev->evbit); + __clear_bit(REL_X, dev->relbit); + __clear_bit(REL_Y, dev->relbit); return 0; } diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 39d1becd35c9..5784e20542a4 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -846,7 +846,7 @@ static const struct psmouse_protocol psmouse_protocols[] = { #ifdef CONFIG_MOUSE_PS2_BYD { .type = PSMOUSE_BYD, - .name = "BydPS/2", + .name = "BYDPS/2", .alias = "byd", .detect = byd_detect, .init = byd_init, diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 6025eb430c0a..a41d8328c064 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse, if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap)) return; - /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */ - if (SYN_ID_FULL(priv->identity) == 0x801 && + /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */ + if ((SYN_ID_FULL(priv->identity) == 0x801 || + SYN_ID_FULL(priv->identity) == 0x802) && !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02)) return; diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index da38f0ad80ed..faa295ec4f31 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -126,7 +126,7 @@ static void process_one_interrupt(struct rmi_driver_data *data, return; fh = to_rmi_function_handler(fn->dev.driver); - if (fn->irq_mask && fh->attention) { + if (fh->attention) { bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask, data->irq_count); if (!bitmap_empty(data->fn_irq_bits, data->irq_count)) @@ -172,8 +172,7 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) * use irq_chip. */ list_for_each_entry(entry, &data->function_list, node) - if (entry->irq_mask) - process_one_interrupt(data, entry); + process_one_interrupt(data, entry); if (data->input) input_sync(data->input); diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c index 892729734c51..fb5fb9140ca9 100644 --- a/drivers/input/touchscreen/melfas_mip4.c +++ b/drivers/input/touchscreen/melfas_mip4.c @@ -1310,8 +1310,34 @@ static ssize_t mip4_sysfs_read_fw_version(struct device *dev, static DEVICE_ATTR(fw_version, S_IRUGO, mip4_sysfs_read_fw_version, NULL); +static ssize_t mip4_sysfs_read_hw_version(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct mip4_ts *ts = i2c_get_clientdata(client); + size_t count; + + /* Take lock to prevent racing with firmware update */ + mutex_lock(&ts->input->mutex); + + /* + * product_name shows the name or version of the hardware + * paired with current firmware in the chip. + */ + count = snprintf(buf, PAGE_SIZE, "%.*s\n", + (int)sizeof(ts->product_name), ts->product_name); + + mutex_unlock(&ts->input->mutex); + + return count; +} + +static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL); + static struct attribute *mip4_attrs[] = { &dev_attr_fw_version.attr, + &dev_attr_hw_version.attr, &dev_attr_update_fw.attr, NULL, }; @@ -1512,6 +1538,6 @@ static struct i2c_driver mip4_driver = { module_i2c_driver(mip4_driver); MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen"); -MODULE_VERSION("2016.03.03"); +MODULE_VERSION("2016.03.12"); MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index b6c4d03de340..880c40b23f66 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -197,28 +197,34 @@ static int sur40_command(struct sur40_state *dev, static int sur40_init(struct sur40_state *dev) { int result; - u8 buffer[24]; + u8 *buffer; + + buffer = kmalloc(24, GFP_KERNEL); + if (!buffer) { + result = -ENOMEM; + goto error; + } /* stupidly replay the original MS driver init sequence */ result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5); if (result < 0) - return result; + goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12); @@ -226,7 +232,8 @@ static int sur40_init(struct sur40_state *dev) * Discard the result buffer - no known data inside except * some version strings, maybe extract these sometime... */ - +error: + kfree(buffer); return result; } diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 0d1fb6b40c46..0dc9a80adb94 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -464,8 +464,13 @@ static int nvm_core_init(struct nvm_dev *dev) dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls; dev->total_secs = dev->nr_luns * dev->sec_per_lun; + dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns), + sizeof(unsigned long), GFP_KERNEL); + if (!dev->lun_map) + return -ENOMEM; INIT_LIST_HEAD(&dev->online_targets); mutex_init(&dev->mlock); + spin_lock_init(&dev->lock); return 0; } @@ -585,6 +590,7 @@ int nvm_register(struct request_queue *q, char *disk_name, return 0; err_init: + kfree(dev->lun_map); kfree(dev); return ret; } @@ -607,6 +613,7 @@ void nvm_unregister(char *disk_name) up_write(&nvm_lock); nvm_exit(dev); + kfree(dev->lun_map); kfree(dev); } EXPORT_SYMBOL(nvm_unregister); diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index d65ec36a2231..72e124a3927d 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -20,6 +20,68 @@ #include "gennvm.h" +static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len) +{ + struct gen_nvm *gn = dev->mp; + struct gennvm_area *area, *prev, *next; + sector_t begin = 0; + sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9; + + if (len > max_sectors) + return -EINVAL; + + area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL); + if (!area) + return -ENOMEM; + + prev = NULL; + + spin_lock(&dev->lock); + list_for_each_entry(next, &gn->area_list, list) { + if (begin + len > next->begin) { + begin = next->end; + prev = next; + continue; + } + break; + } + + if ((begin + len) > max_sectors) { + spin_unlock(&dev->lock); + kfree(area); + return -EINVAL; + } + + area->begin = *lba = begin; + area->end = begin + len; + + if (prev) /* insert into sorted order */ + list_add(&area->list, &prev->list); + else + list_add(&area->list, &gn->area_list); + spin_unlock(&dev->lock); + + return 0; +} + +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin) +{ + struct gen_nvm *gn = dev->mp; + struct gennvm_area *area; + + spin_lock(&dev->lock); + list_for_each_entry(area, &gn->area_list, list) { + if (area->begin != begin) + continue; + + list_del(&area->list); + spin_unlock(&dev->lock); + kfree(area); + return; + } + spin_unlock(&dev->lock); +} + static void gennvm_blocks_free(struct nvm_dev *dev) { struct gen_nvm *gn = dev->mp; @@ -195,7 +257,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) } } - if (dev->ops->get_l2p_tbl) { + if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) { ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, gennvm_block_map, dev); if (ret) { @@ -229,6 +291,7 @@ static int gennvm_register(struct nvm_dev *dev) gn->dev = dev; gn->nr_luns = dev->nr_luns; + INIT_LIST_HEAD(&gn->area_list); dev->mp = gn; ret = gennvm_luns_init(dev, gn); @@ -419,10 +482,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, return nvm_erase_ppa(dev, &addr, 1); } +static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid) +{ + return test_and_set_bit(lunid, dev->lun_map); +} + +static void gennvm_release_lun(struct nvm_dev *dev, int lunid) +{ + WARN_ON(!test_and_clear_bit(lunid, dev->lun_map)); +} + static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) { struct gen_nvm *gn = dev->mp; + if (unlikely(lunid >= dev->nr_luns)) + return NULL; + return &gn->luns[lunid].vlun; } @@ -464,7 +540,13 @@ static struct nvmm_type gennvm = { .erase_blk = gennvm_erase_blk, .get_lun = gennvm_get_lun, + .reserve_lun = gennvm_reserve_lun, + .release_lun = gennvm_release_lun, .lun_info_print = gennvm_lun_info_print, + + .get_area = gennvm_get_area, + .put_area = gennvm_put_area, + }; static int __init gennvm_module_init(void) diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h index 9c24b5b32dac..04d7c23cfc61 100644 --- a/drivers/lightnvm/gennvm.h +++ b/drivers/lightnvm/gennvm.h @@ -39,8 +39,14 @@ struct gen_nvm { int nr_luns; struct gen_lun *luns; + struct list_head area_list; }; +struct gennvm_area { + struct list_head list; + sector_t begin; + sector_t end; /* end is excluded */ +}; #define gennvm_for_each_lun(bm, lun, i) \ for ((i) = 0, lun = &(bm)->luns[0]; \ (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)]) diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 82343783aa47..3ab6495c3fd8 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -965,25 +965,11 @@ static void rrpc_requeue(struct work_struct *work) static void rrpc_gc_free(struct rrpc *rrpc) { - struct rrpc_lun *rlun; - int i; - if (rrpc->krqd_wq) destroy_workqueue(rrpc->krqd_wq); if (rrpc->kgc_wq) destroy_workqueue(rrpc->kgc_wq); - - if (!rrpc->luns) - return; - - for (i = 0; i < rrpc->nr_luns; i++) { - rlun = &rrpc->luns[i]; - - if (!rlun->blocks) - break; - vfree(rlun->blocks); - } } static int rrpc_gc_init(struct rrpc *rrpc) @@ -1053,8 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc) { struct nvm_dev *dev = rrpc->dev; sector_t i; + u64 slba; int ret; + slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9); + rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects); if (!rrpc->trans_map) return -ENOMEM; @@ -1076,7 +1065,7 @@ static int rrpc_map_init(struct rrpc *rrpc) return 0; /* Bring up the mapping table from device */ - ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update, + ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update, rrpc); if (ret) { pr_err("nvm: rrpc: could not read L2P table.\n"); @@ -1086,7 +1075,6 @@ static int rrpc_map_init(struct rrpc *rrpc) return 0; } - /* Minimum pages needed within a lun */ #define PAGE_POOL_SIZE 16 #define ADDR_POOL_SIZE 64 @@ -1141,6 +1129,23 @@ static void rrpc_core_free(struct rrpc *rrpc) static void rrpc_luns_free(struct rrpc *rrpc) { + struct nvm_dev *dev = rrpc->dev; + struct nvm_lun *lun; + struct rrpc_lun *rlun; + int i; + + if (!rrpc->luns) + return; + + for (i = 0; i < rrpc->nr_luns; i++) { + rlun = &rrpc->luns[i]; + lun = rlun->parent; + if (!lun) + break; + dev->mt->release_lun(dev, lun->id); + vfree(rlun->blocks); + } + kfree(rrpc->luns); } @@ -1148,7 +1153,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) { struct nvm_dev *dev = rrpc->dev; struct rrpc_lun *rlun; - int i, j; + int i, j, ret = -EINVAL; if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { pr_err("rrpc: number of pages per block too high."); @@ -1164,25 +1169,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) /* 1:1 mapping */ for (i = 0; i < rrpc->nr_luns; i++) { - struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i); - - rlun = &rrpc->luns[i]; - rlun->rrpc = rrpc; - rlun->parent = lun; - INIT_LIST_HEAD(&rlun->prio_list); - INIT_LIST_HEAD(&rlun->open_list); - INIT_LIST_HEAD(&rlun->closed_list); + int lunid = lun_begin + i; + struct nvm_lun *lun; - INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); - spin_lock_init(&rlun->lock); + if (dev->mt->reserve_lun(dev, lunid)) { + pr_err("rrpc: lun %u is already allocated\n", lunid); + goto err; + } - rrpc->total_blocks += dev->blks_per_lun; - rrpc->nr_sects += dev->sec_per_lun; + lun = dev->mt->get_lun(dev, lunid); + if (!lun) + goto err; + rlun = &rrpc->luns[i]; + rlun->parent = lun; rlun->blocks = vzalloc(sizeof(struct rrpc_block) * rrpc->dev->blks_per_lun); - if (!rlun->blocks) + if (!rlun->blocks) { + ret = -ENOMEM; goto err; + } for (j = 0; j < rrpc->dev->blks_per_lun; j++) { struct rrpc_block *rblk = &rlun->blocks[j]; @@ -1193,11 +1199,43 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) INIT_LIST_HEAD(&rblk->prio); spin_lock_init(&rblk->lock); } + + rlun->rrpc = rrpc; + INIT_LIST_HEAD(&rlun->prio_list); + INIT_LIST_HEAD(&rlun->open_list); + INIT_LIST_HEAD(&rlun->closed_list); + + INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); + spin_lock_init(&rlun->lock); + + rrpc->total_blocks += dev->blks_per_lun; + rrpc->nr_sects += dev->sec_per_lun; + } return 0; err: - return -ENOMEM; + return ret; +} + +/* returns 0 on success and stores the beginning address in *begin */ +static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin) +{ + struct nvm_dev *dev = rrpc->dev; + struct nvmm_type *mt = dev->mt; + sector_t size = rrpc->nr_sects * dev->sec_size; + + size >>= 9; + + return mt->get_area(dev, begin, size); +} + +static void rrpc_area_free(struct rrpc *rrpc) +{ + struct nvm_dev *dev = rrpc->dev; + struct nvmm_type *mt = dev->mt; + + mt->put_area(dev, rrpc->soffset); } static void rrpc_free(struct rrpc *rrpc) @@ -1206,6 +1244,7 @@ static void rrpc_free(struct rrpc *rrpc) rrpc_map_free(rrpc); rrpc_core_free(rrpc); rrpc_luns_free(rrpc); + rrpc_area_free(rrpc); kfree(rrpc); } @@ -1327,6 +1366,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, struct request_queue *bqueue = dev->q; struct request_queue *tqueue = tdisk->queue; struct rrpc *rrpc; + sector_t soffset; int ret; if (!(dev->identity.dom & NVM_RSP_L2P)) { @@ -1352,6 +1392,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, /* simple round-robin strategy */ atomic_set(&rrpc->next_lun, -1); + ret = rrpc_area_init(rrpc, &soffset); + if (ret < 0) { + pr_err("nvm: rrpc: could not initialize area\n"); + return ERR_PTR(ret); + } + rrpc->soffset = soffset; + ret = rrpc_luns_init(rrpc, lun_begin, lun_end); if (ret) { pr_err("nvm: rrpc: could not initialize luns\n"); diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index 855f4a5ca7dd..2653484a3b40 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -97,6 +97,7 @@ struct rrpc { struct nvm_dev *dev; struct gendisk *disk; + sector_t soffset; /* logical sector offset */ u64 poffset; /* physical page offset */ int lun_offset; diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index acd1460cf787..2a691da8c1c7 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -260,7 +260,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) /* get the Controller level irq */ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); - if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { + if (fsl_ifc_ctrl_dev->irq == 0) { dev_err(&dev->dev, "failed to get irq resource " "for IFC\n"); ret = -ENODEV; diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index ef09ba0289d7..d5cfb503b9d6 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -298,8 +298,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev) sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); - if (sg_count != 1 || - (sg_dma_len(&dev->req->sg) < dev->req->sg.length)) { + if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) { message("problem in dma_map_sg"); return -EIO; } diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 42cc953309f1..e83a279f1217 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -142,7 +142,7 @@ config MTD_AR7_PARTS config MTD_BCM63XX_PARTS tristate "BCM63XX CFE partitioning support" - depends on BCM63XX + depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST select CRC32 help This provides partions parsing for BCM63xx devices with CFE diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 8282f47bcf5d..845dd27d9f41 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master, { uint32_t buf; size_t bytes_read; + int err; - if (mtd_read(master, offset, sizeof(buf), &bytes_read, - (uint8_t *)&buf) < 0) { - pr_err("mtd_read error while parsing (offset: 0x%X)!\n", - offset); + err = mtd_read(master, offset, sizeof(buf), &bytes_read, + (uint8_t *)&buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", + offset, err); goto out_default; } @@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master, int trx_part = -1; int last_trx_part = -1; int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; + int err; /* * Some really old flashes (like AT45DB*) had smaller erasesize-s, but @@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master, /* Parse block by block looking for magics */ for (offset = 0; offset <= master->size - blocksize; offset += blocksize) { - /* Nothing more in higher memory */ - if (offset >= 0x2000000) + /* Nothing more in higher memory on BCM47XX (MIPS) */ + if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000) break; if (curr_part >= BCM47XXPART_MAX_PARTS) { @@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Read beginning of the block */ - if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, - &bytes_read, (uint8_t *)buf) < 0) { - pr_err("mtd_read error while parsing (offset: 0x%X)!\n", - offset); + err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, + &bytes_read, (uint8_t *)buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", + offset, err); continue; } @@ -254,10 +258,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Read middle of the block */ - if (mtd_read(master, offset + 0x8000, 0x4, - &bytes_read, (uint8_t *)buf) < 0) { - pr_err("mtd_read error while parsing (offset: 0x%X)!\n", - offset); + err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read, + (uint8_t *)buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", + offset, err); continue; } @@ -277,10 +282,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } offset = master->size - possible_nvram_sizes[i]; - if (mtd_read(master, offset, 0x4, &bytes_read, - (uint8_t *)buf) < 0) { - pr_err("mtd_read error while reading at offset 0x%X!\n", - offset); + err = mtd_read(master, offset, 0x4, &bytes_read, + (uint8_t *)buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while reading (offset 0x%X): %d\n", + offset, err); continue; } diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c index cec3188a170d..41d1d3149c61 100644 --- a/drivers/mtd/bcm63xxpart.c +++ b/drivers/mtd/bcm63xxpart.c @@ -24,6 +24,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/bcm963xx_nvram.h> #include <linux/bcm963xx_tag.h> #include <linux/crc32.h> #include <linux/module.h> @@ -34,12 +35,15 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> -#include <asm/mach-bcm63xx/bcm63xx_nvram.h> -#include <asm/mach-bcm63xx/board_bcm963xx.h> +#define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */ -#define BCM63XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */ +#define BCM963XX_CFE_MAGIC_OFFSET 0x4e0 +#define BCM963XX_CFE_VERSION_OFFSET 0x570 +#define BCM963XX_NVRAM_OFFSET 0x580 -#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0 +/* Ensure strings read from flash structs are null terminated */ +#define STR_NULL_TERMINATE(x) \ + do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0) static int bcm63xx_detect_cfe(struct mtd_info *master) { @@ -58,68 +62,130 @@ static int bcm63xx_detect_cfe(struct mtd_info *master) return 0; /* very old CFE's do not have the cfe-v string, so check for magic */ - ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen, + ret = mtd_read(master, BCM963XX_CFE_MAGIC_OFFSET, 8, &retlen, (void *)buf); buf[retlen] = 0; return strncmp("CFE1CFE1", buf, 8); } -static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, - const struct mtd_partition **pparts, - struct mtd_part_parser_data *data) +static int bcm63xx_read_nvram(struct mtd_info *master, + struct bcm963xx_nvram *nvram) +{ + u32 actual_crc, expected_crc; + size_t retlen; + int ret; + + /* extract nvram data */ + ret = mtd_read(master, BCM963XX_NVRAM_OFFSET, BCM963XX_NVRAM_V5_SIZE, + &retlen, (void *)nvram); + if (ret) + return ret; + + ret = bcm963xx_nvram_checksum(nvram, &expected_crc, &actual_crc); + if (ret) + pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", + expected_crc, actual_crc); + + if (!nvram->psi_size) + nvram->psi_size = BCM963XX_DEFAULT_PSI_SIZE; + + return 0; +} + +static int bcm63xx_read_image_tag(struct mtd_info *master, const char *name, + loff_t tag_offset, struct bcm_tag *buf) +{ + int ret; + size_t retlen; + u32 computed_crc; + + ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf); + if (ret) + return ret; + + if (retlen != sizeof(*buf)) + return -EIO; + + computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf, + offsetof(struct bcm_tag, header_crc)); + if (computed_crc == buf->header_crc) { + STR_NULL_TERMINATE(buf->board_id); + STR_NULL_TERMINATE(buf->tag_version); + + pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n", + name, tag_offset, buf->tag_version, buf->board_id); + + return 0; + } + + pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n", + name, tag_offset, buf->header_crc, computed_crc); + return 1; +} + +static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master, + const struct mtd_partition **pparts, struct bcm963xx_nvram *nvram) { /* CFE, NVRAM and global Linux are always present */ int nrparts = 3, curpart = 0; - struct bcm_tag *buf; + struct bcm_tag *buf = NULL; struct mtd_partition *parts; int ret; - size_t retlen; unsigned int rootfsaddr, kerneladdr, spareaddr; unsigned int rootfslen, kernellen, sparelen, totallen; unsigned int cfelen, nvramlen; unsigned int cfe_erasesize; int i; - u32 computed_crc; bool rootfs_first = false; - if (bcm63xx_detect_cfe(master)) - return -EINVAL; - cfe_erasesize = max_t(uint32_t, master->erasesize, - BCM63XX_CFE_BLOCK_SIZE); + BCM963XX_CFE_BLOCK_SIZE); cfelen = cfe_erasesize; - nvramlen = bcm63xx_nvram_get_psi_size() * SZ_1K; + nvramlen = nvram->psi_size * SZ_1K; nvramlen = roundup(nvramlen, cfe_erasesize); - /* Allocate memory for buffer */ buf = vmalloc(sizeof(struct bcm_tag)); if (!buf) return -ENOMEM; /* Get the tag */ - ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen, - (void *)buf); - - if (retlen != sizeof(struct bcm_tag)) { - vfree(buf); - return -EIO; - } + ret = bcm63xx_read_image_tag(master, "rootfs", cfelen, buf); + if (!ret) { + STR_NULL_TERMINATE(buf->flash_image_start); + if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) || + rootfsaddr < BCM963XX_EXTENDED_SIZE) { + pr_err("invalid rootfs address: %*ph\n", + (int)sizeof(buf->flash_image_start), + buf->flash_image_start); + goto invalid_tag; + } - computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf, - offsetof(struct bcm_tag, header_crc)); - if (computed_crc == buf->header_crc) { - char *boardid = &(buf->board_id[0]); - char *tagversion = &(buf->tag_version[0]); + STR_NULL_TERMINATE(buf->kernel_address); + if (kstrtouint(buf->kernel_address, 10, &kerneladdr) || + kerneladdr < BCM963XX_EXTENDED_SIZE) { + pr_err("invalid kernel address: %*ph\n", + (int)sizeof(buf->kernel_address), + buf->kernel_address); + goto invalid_tag; + } - sscanf(buf->flash_image_start, "%u", &rootfsaddr); - sscanf(buf->kernel_address, "%u", &kerneladdr); - sscanf(buf->kernel_length, "%u", &kernellen); - sscanf(buf->total_length, "%u", &totallen); + STR_NULL_TERMINATE(buf->kernel_length); + if (kstrtouint(buf->kernel_length, 10, &kernellen)) { + pr_err("invalid kernel length: %*ph\n", + (int)sizeof(buf->kernel_length), + buf->kernel_length); + goto invalid_tag; + } - pr_info("CFE boot tag found with version %s and board type %s\n", - tagversion, boardid); + STR_NULL_TERMINATE(buf->total_length); + if (kstrtouint(buf->total_length, 10, &totallen)) { + pr_err("invalid total length: %*ph\n", + (int)sizeof(buf->total_length), + buf->total_length); + goto invalid_tag; + } kerneladdr = kerneladdr - BCM963XX_EXTENDED_SIZE; rootfsaddr = rootfsaddr - BCM963XX_EXTENDED_SIZE; @@ -134,13 +200,14 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, rootfsaddr = kerneladdr + kernellen; rootfslen = spareaddr - rootfsaddr; } - } else { - pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n", - buf->header_crc, computed_crc); + } else if (ret > 0) { +invalid_tag: kernellen = 0; rootfslen = 0; rootfsaddr = 0; spareaddr = cfelen; + } else { + goto out; } sparelen = master->size - spareaddr - nvramlen; @@ -151,11 +218,10 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, if (kernellen > 0) nrparts++; - /* Ask kernel for more memory */ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL); if (!parts) { - vfree(buf); - return -ENOMEM; + ret = -ENOMEM; + goto out; } /* Start building partition list */ @@ -206,9 +272,43 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, sparelen); *pparts = parts; + ret = 0; + +out: vfree(buf); + if (ret) + return ret; + return nrparts; +} + +static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, + const struct mtd_partition **pparts, + struct mtd_part_parser_data *data) +{ + struct bcm963xx_nvram *nvram = NULL; + int ret; + + if (bcm63xx_detect_cfe(master)) + return -EINVAL; + + nvram = vzalloc(sizeof(*nvram)); + if (!nvram) + return -ENOMEM; + + ret = bcm63xx_read_nvram(master, nvram); + if (ret) + goto out; + + if (!mtd_type_is_nand(master)) + ret = bcm63xx_parse_cfe_nor_partitions(master, pparts, nvram); + else + ret = -EINVAL; + +out: + vfree(nvram); + return ret; }; static struct mtd_part_parser bcm63xx_cfe_parser = { diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index c3a2695a4420..e7b2e439696c 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -72,13 +72,11 @@ MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, " * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC) * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC) * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15 - * @oobavail: 8 available bytes remaining after ECC toll */ static struct nand_ecclayout docg3_oobinfo = { .eccbytes = 8, .eccpos = {7, 8, 9, 10, 11, 12, 13, 14}, .oobfree = {{0, 7}, {15, 1} }, - .oobavail = 8, }; static inline u8 doc_readb(struct docg3 *docg3, u16 reg) @@ -1438,7 +1436,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, oobdelta = mtd->oobsize; break; case MTD_OPS_AUTO_OOB: - oobdelta = mtd->ecclayout->oobavail; + oobdelta = mtd->oobavail; break; default: return -EINVAL; @@ -1860,6 +1858,7 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) mtd->_write_oob = doc_write_oob; mtd->_block_isbad = doc_block_isbad; mtd->ecclayout = &docg3_oobinfo; + mtd->oobavail = 8; mtd->ecc_strength = DOC_ECC_BCH_T; return 0; diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index 627a9bc37679..cbd8547d7aad 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c @@ -19,6 +19,7 @@ static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE; static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE; +static unsigned long writebuf_size = 64; #define MTDRAM_TOTAL_SIZE (total_size * 1024) #define MTDRAM_ERASE_SIZE (erase_size * 1024) @@ -27,6 +28,8 @@ module_param(total_size, ulong, 0); MODULE_PARM_DESC(total_size, "Total device size in KiB"); module_param(erase_size, ulong, 0); MODULE_PARM_DESC(erase_size, "Device erase block size in KiB"); +module_param(writebuf_size, ulong, 0); +MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)"); #endif // We could store these in the mtd structure, but we only support 1 device.. @@ -123,7 +126,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, mtd->flags = MTD_CAP_RAM; mtd->size = size; mtd->writesize = 1; - mtd->writebufsize = 64; /* Mimic CFI NOR flashes */ + mtd->writebufsize = writebuf_size; mtd->erasesize = MTDRAM_ERASE_SIZE; mtd->priv = mapped_address; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 10bf304027dd..08de4b2cf0f5 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -126,10 +126,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from, if (ops->oobbuf) { size_t len, pages; - if (ops->mode == MTD_OPS_AUTO_OOB) - len = mtd->oobavail; - else - len = mtd->oobsize; + len = mtd_oobavail(mtd, ops); pages = mtd_div_by_ws(mtd->size, mtd); pages -= mtd_div_by_ws(from, mtd); if (ops->ooboffs + ops->ooblen > pages * len) diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index fc8b3d16cce7..cb06bdd21a1b 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c @@ -346,7 +346,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset)) return MTDSWAP_SCANNED_BAD; - ops.ooblen = 2 * d->mtd->ecclayout->oobavail; + ops.ooblen = 2 * d->mtd->oobavail; ops.oobbuf = d->oob_buf; ops.ooboffs = 0; ops.datbuf = NULL; @@ -359,7 +359,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) data = (struct mtdswap_oobdata *)d->oob_buf; data2 = (struct mtdswap_oobdata *) - (d->oob_buf + d->mtd->ecclayout->oobavail); + (d->oob_buf + d->mtd->oobavail); if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { eb->erase_count = le32_to_cpu(data->count); @@ -933,7 +933,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, ops.mode = MTD_OPS_AUTO_OOB; ops.len = mtd->writesize; - ops.ooblen = mtd->ecclayout->oobavail; + ops.ooblen = mtd->oobavail; ops.ooboffs = 0; ops.datbuf = d->page_buf; ops.oobbuf = d->oob_buf; @@ -945,7 +945,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, for (i = 0; i < mtd_pages; i++) { patt = mtdswap_test_patt(test + i); memset(d->page_buf, patt, mtd->writesize); - memset(d->oob_buf, patt, mtd->ecclayout->oobavail); + memset(d->oob_buf, patt, mtd->oobavail); ret = mtd_write_oob(mtd, pos, &ops); if (ret) goto error; @@ -964,7 +964,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, if (p1[j] != patt) goto error; - for (j = 0; j < mtd->ecclayout->oobavail; j++) + for (j = 0; j < mtd->oobavail; j++) if (p2[j] != (unsigned char)patt) goto error; @@ -1387,7 +1387,7 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, if (!d->page_buf) goto page_buf_fail; - d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL); + d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL); if (!d->oob_buf) goto oob_buf_fail; @@ -1417,7 +1417,6 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) unsigned long part; unsigned int eblocks, eavailable, bad_blocks, spare_cnt; uint64_t swap_size, use_size, size_limit; - struct nand_ecclayout *oinfo; int ret; parts = &partitions[0]; @@ -1447,17 +1446,10 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) return; } - oinfo = mtd->ecclayout; - if (!oinfo) { - printk(KERN_ERR "%s: mtd%d does not have OOB\n", - MTDSWAP_PREFIX, mtd->index); - return; - } - - if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) { + if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) { printk(KERN_ERR "%s: Not enough free bytes in OOB, " "%d available, %zu needed.\n", - MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); + MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE); return; } diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 20f01b3ec23d..f05e0e9eb2f7 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR config MTD_NAND_GPIO tristate "GPIO assisted NAND Flash driver" depends on GPIOLIB || COMPILE_TEST + depends on HAS_IOMEM help This enables a NAND flash driver where control signals are connected to GPIO pins, and commands and data are communicated @@ -310,6 +311,7 @@ config MTD_NAND_CAFE config MTD_NAND_CS553X tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" depends on X86_32 + depends on !UML && HAS_IOMEM help The CS553x companion chips for the AMD Geode processor include NAND flash controllers with built-in hardware ECC @@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC config MTD_NAND_VF610_NFC tristate "Support for Freescale NFC for VF610/MPC5125" depends on (SOC_VF610 || COMPILE_TEST) + depends on HAS_IOMEM help Enables support for NAND Flash Controller on some Freescale processors like the VF610, MPC5125, MCF54418 or Kinetis K70. @@ -553,4 +556,11 @@ config MTD_NAND_HISI504 help Enables support for NAND controller on Hisilicon SoC Hip04. +config MTD_NAND_QCOM + tristate "Support for NAND on QCOM SoCs" + depends on ARCH_QCOM + help + Enables support for NAND flash chips on SoCs containing the EBI2 NAND + controller. This controller is found on IPQ806x SoC. + endif # MTD_NAND diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 9e3623308509..f55335373f7c 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -56,5 +56,6 @@ obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ +obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o nand-objs := nand_base.o nand_bbt.o nand_timings.o diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index bddcf83d6859..20cbaabb2959 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -65,6 +65,11 @@ module_param(on_flash_bbt, int, 0); struct atmel_nand_caps { bool pmecc_correct_erase_page; + uint8_t pmecc_max_correction; +}; + +struct atmel_nand_nfc_caps { + uint32_t rb_mask; }; /* oob layout for large page size @@ -111,6 +116,7 @@ struct atmel_nfc { /* Point to the sram bank which include readed data via NFC */ void *data_in_sram; bool will_write_sram; + const struct atmel_nand_nfc_caps *caps; }; static struct atmel_nfc nand_nfc; @@ -140,6 +146,7 @@ struct atmel_nand_host { int pmecc_cw_len; /* Length of codeword */ void __iomem *pmerrloc_base; + void __iomem *pmerrloc_el_base; void __iomem *pmecc_rom_base; /* lookup table for alpha_to and index_of */ @@ -468,6 +475,7 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) * 8-bits 13-bytes 14-bytes * 12-bits 20-bytes 21-bytes * 24-bits 39-bytes 42-bytes + * 32-bits 52-bytes 56-bytes */ static int pmecc_get_ecc_bytes(int cap, int sector_size) { @@ -813,7 +821,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc, sector_size = host->pmecc_sector_size; while (err_nbr) { - tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1; + tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_el_base, i) - 1; byte_pos = tmp / 8; bit_pos = tmp % 8; @@ -825,7 +833,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc, *(buf + byte_pos) ^= (1 << bit_pos); pos = sector_num * host->pmecc_sector_size + byte_pos; - dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", + dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", pos, bit_pos, err_byte, *(buf + byte_pos)); } else { /* Bit flip in OOB area */ @@ -835,7 +843,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc, ecc[tmp] ^= (1 << bit_pos); pos = tmp + nand_chip->ecc.layout->eccpos[0]; - dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", + dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", pos, bit_pos, err_byte, ecc[tmp]); } @@ -1017,6 +1025,9 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd) case 24: val = PMECC_CFG_BCH_ERR24; break; + case 32: + val = PMECC_CFG_BCH_ERR32; + break; } if (host->pmecc_sector_size == 512) @@ -1078,6 +1089,9 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host, /* If device tree doesn't specify, use NAND's minimum ECC parameters */ if (host->pmecc_corr_cap == 0) { + if (*cap > host->caps->pmecc_max_correction) + return -EINVAL; + /* use the most fitable ecc bits (the near bigger one ) */ if (*cap <= 2) host->pmecc_corr_cap = 2; @@ -1089,6 +1103,8 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host, host->pmecc_corr_cap = 12; else if (*cap <= 24) host->pmecc_corr_cap = 24; + else if (*cap <= 32) + host->pmecc_corr_cap = 32; else return -EINVAL; } @@ -1205,6 +1221,8 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev, err_no = PTR_ERR(host->pmerrloc_base); goto err; } + host->pmerrloc_el_base = host->pmerrloc_base + ATMEL_PMERRLOC_SIGMAx + + (host->caps->pmecc_max_correction + 1) * 4; if (!host->has_no_lookup_table) { regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3); @@ -1486,8 +1504,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode) ecc_writel(host->ecc, CR, ATMEL_ECC_RST); } -static const struct of_device_id atmel_nand_dt_ids[]; - static int atmel_of_init_port(struct atmel_nand_host *host, struct device_node *np) { @@ -1498,7 +1514,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host, enum of_gpio_flags flags = 0; host->caps = (struct atmel_nand_caps *) - of_match_device(atmel_nand_dt_ids, host->dev)->data; + of_device_get_match_data(host->dev); if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { if (val >= 32) { @@ -1547,10 +1563,16 @@ static int atmel_of_init_port(struct atmel_nand_host *host, * them from NAND ONFI parameters. */ if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) { - if ((val != 2) && (val != 4) && (val != 8) && (val != 12) && - (val != 24)) { + if (val > host->caps->pmecc_max_correction) { dev_err(host->dev, - "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n", + "Required ECC strength too high: %u max %u\n", + val, host->caps->pmecc_max_correction); + return -EINVAL; + } + if ((val != 2) && (val != 4) && (val != 8) && + (val != 12) && (val != 24) && (val != 32)) { + dev_err(host->dev, + "Required ECC strength not supported: %u\n", val); return -EINVAL; } @@ -1560,7 +1582,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host, if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) { if ((val != 512) && (val != 1024)) { dev_err(host->dev, - "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n", + "Required ECC sector size not supported: %u\n", val); return -EINVAL; } @@ -1677,9 +1699,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id) nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); ret = IRQ_HANDLED; } - if (pending & NFC_SR_RB_EDGE) { + if (pending & host->nfc->caps->rb_mask) { complete(&host->nfc->comp_ready); - nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE); + nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask); ret = IRQ_HANDLED; } if (pending & NFC_SR_CMD_DONE) { @@ -1697,7 +1719,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag) if (flag & NFC_SR_XFR_DONE) init_completion(&host->nfc->comp_xfer_done); - if (flag & NFC_SR_RB_EDGE) + if (flag & host->nfc->caps->rb_mask) init_completion(&host->nfc->comp_ready); if (flag & NFC_SR_CMD_DONE) @@ -1715,7 +1737,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag) if (flag & NFC_SR_XFR_DONE) comp[index++] = &host->nfc->comp_xfer_done; - if (flag & NFC_SR_RB_EDGE) + if (flag & host->nfc->caps->rb_mask) comp[index++] = &host->nfc->comp_ready; if (flag & NFC_SR_CMD_DONE) @@ -1783,7 +1805,7 @@ static int nfc_device_ready(struct mtd_info *mtd) dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", mask & status); - return status & NFC_SR_RB_EDGE; + return status & host->nfc->caps->rb_mask; } static void nfc_select_chip(struct mtd_info *mtd, int chip) @@ -1956,8 +1978,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command, } /* fall through */ default: - nfc_prepare_interrupt(host, NFC_SR_RB_EDGE); - nfc_wait_interrupt(host, NFC_SR_RB_EDGE); + nfc_prepare_interrupt(host, host->nfc->caps->rb_mask); + nfc_wait_interrupt(host, host->nfc->caps->rb_mask); } } @@ -2304,17 +2326,34 @@ static int atmel_nand_remove(struct platform_device *pdev) return 0; } +/* + * AT91RM9200 does not have PMECC or PMECC Errloc peripherals for + * BCH ECC. Combined with the "atmel,has-pmecc", it is used to describe + * devices from the SAM9 family that have those. + */ static const struct atmel_nand_caps at91rm9200_caps = { .pmecc_correct_erase_page = false, + .pmecc_max_correction = 24, }; static const struct atmel_nand_caps sama5d4_caps = { .pmecc_correct_erase_page = true, + .pmecc_max_correction = 24, +}; + +/* + * The PMECC Errloc controller starting in SAMA5D2 is not compatible, + * as the increased correction strength requires more registers. + */ +static const struct atmel_nand_caps sama5d2_caps = { + .pmecc_correct_erase_page = true, + .pmecc_max_correction = 32, }; static const struct of_device_id atmel_nand_dt_ids[] = { { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps }, { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps }, + { .compatible = "atmel,sama5d2-nand", .data = &sama5d2_caps }, { /* sentinel */ } }; @@ -2354,6 +2393,11 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev) } } + nfc->caps = (const struct atmel_nand_nfc_caps *) + of_device_get_match_data(&pdev->dev); + if (!nfc->caps) + return -ENODEV; + nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff); nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */ @@ -2382,8 +2426,17 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev) return 0; } +static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = { + .rb_mask = NFC_SR_RB_EDGE0, +}; + +static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = { + .rb_mask = NFC_SR_RB_EDGE3, +}; + static const struct of_device_id atmel_nand_nfc_match[] = { - { .compatible = "atmel,sama5d3-nfc" }, + { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps }, + { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match); diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h index 668e7358f19b..834d694487bd 100644 --- a/drivers/mtd/nand/atmel_nand_ecc.h +++ b/drivers/mtd/nand/atmel_nand_ecc.h @@ -43,6 +43,7 @@ #define PMECC_CFG_BCH_ERR8 (2 << 0) #define PMECC_CFG_BCH_ERR12 (3 << 0) #define PMECC_CFG_BCH_ERR24 (4 << 0) +#define PMECC_CFG_BCH_ERR32 (5 << 0) #define PMECC_CFG_SECTOR512 (0 << 4) #define PMECC_CFG_SECTOR1024 (1 << 4) @@ -108,7 +109,11 @@ #define PMERRLOC_ERR_NUM_MASK (0x1f << 8) #define PMERRLOC_CALC_DONE (1 << 0) #define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */ -#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */ + +/* + * The ATMEL_PMERRLOC_ELx register location depends from the number of + * bits corrected by the PMECC controller. Do not use it. + */ /* Register access macros for PMECC */ #define pmecc_readl_relaxed(addr, reg) \ @@ -136,7 +141,7 @@ readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4)) #define pmerrloc_readl_el_relaxed(addr, n) \ - readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4)) + readl_relaxed((addr) + ((n) * 4)) /* Galois field dimension */ #define PMECC_GF_DIMENSION_13 13 diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h index 4d5d26221a7e..0bbc1fa97dba 100644 --- a/drivers/mtd/nand/atmel_nand_nfc.h +++ b/drivers/mtd/nand/atmel_nand_nfc.h @@ -42,7 +42,8 @@ #define NFC_SR_UNDEF (1 << 21) #define NFC_SR_AWB (1 << 22) #define NFC_SR_ASE (1 << 23) -#define NFC_SR_RB_EDGE (1 << 24) +#define NFC_SR_RB_EDGE0 (1 << 24) +#define NFC_SR_RB_EDGE3 (1 << 27) #define ATMEL_HSMC_NFC_IER 0x0c #define ATMEL_HSMC_NFC_IDR 0x10 diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index 844fc07d22cd..e0528397306a 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = { [BRCMNAND_FC_BASE] = 0x400, }; +/* BRCMNAND v7.1 */ +static const u16 brcmnand_regs_v71[] = { + [BRCMNAND_CMD_START] = 0x04, + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, + [BRCMNAND_CMD_ADDRESS] = 0x0c, + [BRCMNAND_INTFC_STATUS] = 0x14, + [BRCMNAND_CS_SELECT] = 0x18, + [BRCMNAND_CS_XOR] = 0x1c, + [BRCMNAND_LL_OP] = 0x20, + [BRCMNAND_CS0_BASE] = 0x50, + [BRCMNAND_CS1_BASE] = 0, + [BRCMNAND_CORR_THRESHOLD] = 0xdc, + [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, + [BRCMNAND_UNCORR_COUNT] = 0xfc, + [BRCMNAND_CORR_COUNT] = 0x100, + [BRCMNAND_CORR_EXT_ADDR] = 0x10c, + [BRCMNAND_CORR_ADDR] = 0x110, + [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, + [BRCMNAND_UNCORR_ADDR] = 0x118, + [BRCMNAND_SEMAPHORE] = 0x150, + [BRCMNAND_ID] = 0x194, + [BRCMNAND_ID_EXT] = 0x198, + [BRCMNAND_LL_RDATA] = 0x19c, + [BRCMNAND_OOB_READ_BASE] = 0x200, + [BRCMNAND_OOB_READ_10_BASE] = 0, + [BRCMNAND_OOB_WRITE_BASE] = 0x280, + [BRCMNAND_OOB_WRITE_10_BASE] = 0, + [BRCMNAND_FC_BASE] = 0x400, +}; + enum brcmnand_cs_reg { BRCMNAND_CS_CFG_EXT = 0, BRCMNAND_CS_CFG, @@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } /* Register offsets */ - if (ctrl->nand_version >= 0x0600) + if (ctrl->nand_version >= 0x0701) + ctrl->reg_offsets = brcmnand_regs_v71; + else if (ctrl->nand_version >= 0x0600) ctrl->reg_offsets = brcmnand_regs_v60; else if (ctrl->nand_version >= 0x0500) ctrl->reg_offsets = brcmnand_regs_v50; @@ -796,7 +828,8 @@ static struct nand_ecclayout *brcmnand_create_layout(int ecc_level, idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1) break; } - goto out; + + return layout; } /* @@ -847,10 +880,7 @@ static struct nand_ecclayout *brcmnand_create_layout(int ecc_level, idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1) break; } -out: - /* Sum available OOB */ - for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE; i++) - layout->oobavail += layout->oobfree[i].length; + return layout; } diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index aa1a616b9fb6..e553aff68987 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -537,7 +537,7 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd, return 0; } -static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs) { return 0; } diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index f170f3c31b34..547c1002941d 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -794,7 +794,7 @@ static int doc200x_dev_ready(struct mtd_info *mtd) } } -static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs) { /* This is our last resort if we couldn't find or create a BBT. Just pretend all blocks are good. */ diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index df4165b02c62..d86a60e1bbcb 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -225,7 +225,6 @@ struct docg4_priv { static struct nand_ecclayout docg4_oobinfo = { .eccbytes = 9, .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, - .oobavail = 5, .oobfree = { {.offset = 2, .length = 5} } }; @@ -1121,7 +1120,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs) return ret; } -static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip) +static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs) { /* only called when module_param ignore_badblocks is set */ return 0; diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 235ddcb58f39..8122c699ccf2 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -1,7 +1,7 @@ /* * Freescale GPMI NAND Flash Driver * - * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. + * Copyright (C) 2010-2015 Freescale Semiconductor, Inc. * Copyright (C) 2008 Embedded Alley Solutions, Inc. * * This program is free software; you can redistribute it and/or modify @@ -136,7 +136,7 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this) * * We may have available oob space in this case. */ -static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this) +static int set_geometry_by_ecc_info(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; struct nand_chip *chip = &this->nand; @@ -145,7 +145,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this) unsigned int block_mark_bit_offset; if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) - return false; + return -EINVAL; switch (chip->ecc_step_ds) { case SZ_512: @@ -158,19 +158,19 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this) dev_err(this->dev, "unsupported nand chip. ecc bits : %d, ecc size : %d\n", chip->ecc_strength_ds, chip->ecc_step_ds); - return false; + return -EINVAL; } geo->ecc_chunk_size = chip->ecc_step_ds; geo->ecc_strength = round_up(chip->ecc_strength_ds, 2); if (!gpmi_check_ecc(this)) - return false; + return -EINVAL; /* Keep the C >= O */ if (geo->ecc_chunk_size < mtd->oobsize) { dev_err(this->dev, "unsupported nand chip. ecc size: %d, oob size : %d\n", chip->ecc_step_ds, mtd->oobsize); - return false; + return -EINVAL; } /* The default value, see comment in the legacy_set_geometry(). */ @@ -242,7 +242,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this) + ALIGN(geo->ecc_chunk_count, 4); if (!this->swap_block_mark) - return true; + return 0; /* For bit swap. */ block_mark_bit_offset = mtd->writesize * 8 - @@ -251,7 +251,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this) geo->block_mark_byte_offset = block_mark_bit_offset / 8; geo->block_mark_bit_offset = block_mark_bit_offset % 8; - return true; + return 0; } static int legacy_set_geometry(struct gpmi_nand_data *this) @@ -285,7 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this) geo->ecc_strength = get_ecc_strength(this); if (!gpmi_check_ecc(this)) { dev_err(this->dev, - "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n", + "ecc strength: %d cannot be supported by the controller (%d)\n" + "try to use minimum ecc strength that NAND chip required\n", geo->ecc_strength, this->devdata->bch_max_ecc_strength); return -EINVAL; @@ -366,10 +367,11 @@ static int legacy_set_geometry(struct gpmi_nand_data *this) int common_nfc_set_geometry(struct gpmi_nand_data *this) { - if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc") - && set_geometry_by_ecc_info(this)) - return 0; - return legacy_set_geometry(this); + if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")) + || legacy_set_geometry(this)) + return set_geometry_by_ecc_info(this); + + return 0; } struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) @@ -2033,9 +2035,54 @@ static int gpmi_nand_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int gpmi_pm_suspend(struct device *dev) +{ + struct gpmi_nand_data *this = dev_get_drvdata(dev); + + release_dma_channels(this); + return 0; +} + +static int gpmi_pm_resume(struct device *dev) +{ + struct gpmi_nand_data *this = dev_get_drvdata(dev); + int ret; + + ret = acquire_dma_channels(this); + if (ret < 0) + return ret; + + /* re-init the GPMI registers */ + this->flags &= ~GPMI_TIMING_INIT_OK; + ret = gpmi_init(this); + if (ret) { + dev_err(this->dev, "Error setting GPMI : %d\n", ret); + return ret; + } + + /* re-init the BCH registers */ + ret = bch_set_geometry(this); + if (ret) { + dev_err(this->dev, "Error setting BCH : %d\n", ret); + return ret; + } + + /* re-init others */ + gpmi_extra_init(this); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops gpmi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) +}; + static struct platform_driver gpmi_nand_driver = { .driver = { .name = "gpmi-nand", + .pm = &gpmi_pm_ops, .of_match_table = gpmi_nand_id_table, }, .probe = gpmi_nand_probe, diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c index f8d37f36a81c..96502b624cfb 100644 --- a/drivers/mtd/nand/hisi504_nand.c +++ b/drivers/mtd/nand/hisi504_nand.c @@ -632,7 +632,6 @@ static void hisi_nfc_host_init(struct hinfc_host *host) } static struct nand_ecclayout nand_ecc_2K_16bits = { - .oobavail = 6, .oobfree = { {2, 6} }, }; diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index b19d2a9a5eb9..673ceb2a0b44 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -427,9 +427,6 @@ static int jz_nand_probe(struct platform_device *pdev) chip->ecc.strength = 4; chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; - if (pdata) - chip->ecc.layout = pdata->ecc_layout; - chip->chip_delay = 50; chip->cmd_ctrl = jz_nand_cmd_ctrl; chip->select_chip = jz_nand_select_chip; diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index 9bc435d72a86..d8c3e7afcc0b 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -750,7 +750,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) } nand_chip->ecc.mode = NAND_ECC_HW; - nand_chip->ecc.size = mtd->writesize; + nand_chip->ecc.size = 512; nand_chip->ecc.layout = &lpc32xx_nand_oob; host->mlcsubpages = mtd->writesize / 512; diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index 6b93e899d4e9..5d7843ffff6a 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c @@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) static int mpc5121_nfc_probe(struct platform_device *op) { - struct device_node *rootnode, *dn = op->dev.of_node; + struct device_node *dn = op->dev.of_node; struct clk *clk; struct device *dev = &op->dev; struct mpc5121_nfc_prv *prv; @@ -712,18 +712,15 @@ static int mpc5121_nfc_probe(struct platform_device *op) chip->ecc.mode = NAND_ECC_SOFT; /* Support external chip-select logic on ADS5121 board */ - rootnode = of_find_node_by_path("/"); - if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) { + if (of_machine_is_compatible("fsl,mpc5121ads")) { retval = ads5121_chipselect_init(mtd); if (retval) { dev_err(dev, "Chipselect init error!\n"); - of_node_put(rootnode); return retval; } chip->select_chip = ads5121_select_chip; } - of_node_put(rootnode); /* Enable NFC clock */ clk = devm_clk_get(dev, "ipg"); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index f2c8ff398d6c..b6facac54fc0 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -313,13 +313,12 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) * nand_block_bad - [DEFAULT] Read bad block marker from the chip * @mtd: MTD device structure * @ofs: offset from device start - * @getchip: 0, if the chip is already selected * * Check, if the block is bad. */ -static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +static int nand_block_bad(struct mtd_info *mtd, loff_t ofs) { - int page, chipnr, res = 0, i = 0; + int page, res = 0, i = 0; struct nand_chip *chip = mtd_to_nand(mtd); u16 bad; @@ -328,15 +327,6 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) page = (int)(ofs >> chip->page_shift) & chip->pagemask; - if (getchip) { - chipnr = (int)(ofs >> chip->chip_shift); - - nand_get_device(mtd, FL_READING); - - /* Select the NAND device */ - chip->select_chip(mtd, chipnr); - } - do { if (chip->options & NAND_BUSWIDTH_16) { chip->cmdfunc(mtd, NAND_CMD_READOOB, @@ -361,11 +351,6 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) i++; } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)); - if (getchip) { - chip->select_chip(mtd, -1); - nand_release_device(mtd); - } - return res; } @@ -503,19 +488,17 @@ static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) * nand_block_checkbad - [GENERIC] Check if a block is marked bad * @mtd: MTD device structure * @ofs: offset from device start - * @getchip: 0, if the chip is already selected * @allowbbt: 1, if its allowed to access the bbt area * * Check, if the block is bad. Either by reading the bad block table or * calling of the scan function. */ -static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip, - int allowbbt) +static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt) { struct nand_chip *chip = mtd_to_nand(mtd); if (!chip->bbt) - return chip->block_bad(mtd, ofs, getchip); + return chip->block_bad(mtd, ofs); /* Return info from the table */ return nand_isbad_bbt(mtd, ofs, allowbbt); @@ -566,8 +549,8 @@ void nand_wait_ready(struct mtd_info *mtd) cond_resched(); } while (time_before(jiffies, timeo)); - pr_warn_ratelimited( - "timeout while waiting for chip to become ready\n"); + if (!chip->dev_ready(mtd)) + pr_warn_ratelimited("timeout while waiting for chip to become ready\n"); out: led_trigger_event(nand_led_trigger, LED_OFF); } @@ -1723,8 +1706,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, int ret = 0; uint32_t readlen = ops->len; uint32_t oobreadlen = ops->ooblen; - uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ? - mtd->oobavail : mtd->oobsize; + uint32_t max_oobsize = mtd_oobavail(mtd, ops); uint8_t *bufpoi, *oob, *buf; int use_bufpoi; @@ -2075,10 +2057,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, stats = mtd->ecc_stats; - if (ops->mode == MTD_OPS_AUTO_OOB) - len = chip->ecc.layout->oobavail; - else - len = mtd->oobsize; + len = mtd_oobavail(mtd, ops); if (unlikely(ops->ooboffs >= len)) { pr_debug("%s: attempt to start read outside oob\n", @@ -2575,8 +2554,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, uint32_t writelen = ops->len; uint32_t oobwritelen = ops->ooblen; - uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ? - mtd->oobavail : mtd->oobsize; + uint32_t oobmaxlen = mtd_oobavail(mtd, ops); uint8_t *oob = ops->oobbuf; uint8_t *buf = ops->datbuf; @@ -2766,10 +2744,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, (int)ops->ooblen); - if (ops->mode == MTD_OPS_AUTO_OOB) - len = chip->ecc.layout->oobavail; - else - len = mtd->oobsize; + len = mtd_oobavail(mtd, ops); /* Do not allow write past end of page */ if ((ops->ooboffs + ops->ooblen) > len) { @@ -2957,7 +2932,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, while (len) { /* Check if we have a bad block, we do not erase bad blocks! */ if (nand_block_checkbad(mtd, ((loff_t) page) << - chip->page_shift, 0, allowbbt)) { + chip->page_shift, allowbbt)) { pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", __func__, page); instr->state = MTD_ERASE_FAILED; @@ -3044,7 +3019,20 @@ static void nand_sync(struct mtd_info *mtd) */ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) { - return nand_block_checkbad(mtd, offs, 1, 0); + struct nand_chip *chip = mtd_to_nand(mtd); + int chipnr = (int)(offs >> chip->chip_shift); + int ret; + + /* Select the NAND device */ + nand_get_device(mtd, FL_READING); + chip->select_chip(mtd, chipnr); + + ret = nand_block_checkbad(mtd, offs, 0); + + chip->select_chip(mtd, -1); + nand_release_device(mtd); + + return ret; } /** @@ -4287,10 +4275,8 @@ int nand_scan_tail(struct mtd_info *mtd) } /* See nand_bch_init() for details. */ - ecc->bytes = DIV_ROUND_UP( - ecc->strength * fls(8 * ecc->size), 8); - ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes, - &ecc->layout); + ecc->bytes = 0; + ecc->priv = nand_bch_init(mtd); if (!ecc->priv) { pr_warn("BCH ECC initialization failed!\n"); BUG(); @@ -4325,11 +4311,11 @@ int nand_scan_tail(struct mtd_info *mtd) * The number of bytes available for a client to place data into * the out of band area. */ - ecc->layout->oobavail = 0; - for (i = 0; ecc->layout->oobfree[i].length - && i < ARRAY_SIZE(ecc->layout->oobfree); i++) - ecc->layout->oobavail += ecc->layout->oobfree[i].length; - mtd->oobavail = ecc->layout->oobavail; + mtd->oobavail = 0; + if (ecc->layout) { + for (i = 0; ecc->layout->oobfree[i].length; i++) + mtd->oobavail += ecc->layout->oobfree[i].length; + } /* ECC sanity check: warn if it's too weak */ if (!nand_ecc_strength_good(mtd)) diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 4b6a7085b442..2fbb523df066 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs) return ret; } - -EXPORT_SYMBOL(nand_scan_bbt); diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c index a87c1b628dfc..b585bae37929 100644 --- a/drivers/mtd/nand/nand_bch.c +++ b/drivers/mtd/nand/nand_bch.c @@ -107,9 +107,6 @@ EXPORT_SYMBOL(nand_bch_correct_data); /** * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction * @mtd: MTD block structure - * @eccsize: ecc block size in bytes - * @eccbytes: ecc length in bytes - * @ecclayout: output default layout * * Returns: * a pointer to a new NAND BCH control structure, or NULL upon failure @@ -123,14 +120,21 @@ EXPORT_SYMBOL(nand_bch_correct_data); * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8) * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits) */ -struct nand_bch_control * -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, - struct nand_ecclayout **ecclayout) +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) { + struct nand_chip *nand = mtd_to_nand(mtd); unsigned int m, t, eccsteps, i; - struct nand_ecclayout *layout; + struct nand_ecclayout *layout = nand->ecc.layout; struct nand_bch_control *nbc = NULL; unsigned char *erased_page; + unsigned int eccsize = nand->ecc.size; + unsigned int eccbytes = nand->ecc.bytes; + unsigned int eccstrength = nand->ecc.strength; + + if (!eccbytes && eccstrength) { + eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8); + nand->ecc.bytes = eccbytes; + } if (!eccsize || !eccbytes) { printk(KERN_WARNING "ecc parameters not supplied\n"); @@ -158,7 +162,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, eccsteps = mtd->writesize/eccsize; /* if no ecc placement scheme was provided, build one */ - if (!*ecclayout) { + if (!layout) { /* handle large page devices only */ if (mtd->oobsize < 64) { @@ -184,7 +188,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, layout->oobfree[0].offset = 2; layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; - *ecclayout = layout; + nand->ecc.layout = layout; } /* sanity checks */ @@ -192,7 +196,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, printk(KERN_WARNING "eccsize %u is too large\n", eccsize); goto fail; } - if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) { + if (layout->eccbytes != (eccsteps*eccbytes)) { printk(KERN_WARNING "invalid ecc layout\n"); goto fail; } @@ -216,6 +220,9 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, for (i = 0; i < eccbytes; i++) nbc->eccmask[i] ^= 0xff; + if (!eccstrength) + nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize); + return nbc; fail: nand_bch_free(nbc); diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index a8804a3da076..ccc05f5b2695 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] = { SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) }, {"H27UCG8T2ATR-BC 64G 3.3V 8-bit", { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} }, - SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K), - 4 }, + SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640, + NAND_ECC_INFO(40, SZ_1K), 4 }, LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index 220ddfcf29f5..dbc5b571c2bb 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c @@ -113,7 +113,7 @@ static int nuc900_check_rb(struct nuc900_nand *nand) { unsigned int val; spin_lock(&nand->lock); - val = __raw_readl(REG_SMISR); + val = __raw_readl(nand->reg + REG_SMISR); val &= READYBUSY; spin_unlock(&nand->lock); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index c553f78ab83f..0749ca1a1456 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1807,13 +1807,19 @@ static int omap_nand_probe(struct platform_device *pdev) goto return_error; } + /* + * Bail out earlier to let NAND_ECC_SOFT code create its own + * ecclayout instead of using ours. + */ + if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) { + nand_chip->ecc.mode = NAND_ECC_SOFT; + goto scan_tail; + } + /* populate MTD interface based on ECC scheme */ ecclayout = &info->oobinfo; + nand_chip->ecc.layout = ecclayout; switch (info->ecc_opt) { - case OMAP_ECC_HAM1_CODE_SW: - nand_chip->ecc.mode = NAND_ECC_SOFT; - break; - case OMAP_ECC_HAM1_CODE_HW: pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n"); nand_chip->ecc.mode = NAND_ECC_HW; @@ -1861,10 +1867,7 @@ static int omap_nand_probe(struct platform_device *pdev) ecclayout->oobfree->offset = 1 + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; /* software bch library is used for locating errors */ - nand_chip->ecc.priv = nand_bch_init(mtd, - nand_chip->ecc.size, - nand_chip->ecc.bytes, - &ecclayout); + nand_chip->ecc.priv = nand_bch_init(mtd); if (!nand_chip->ecc.priv) { dev_err(&info->pdev->dev, "unable to use BCH library\n"); err = -EINVAL; @@ -1925,10 +1928,7 @@ static int omap_nand_probe(struct platform_device *pdev) ecclayout->oobfree->offset = 1 + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; /* software bch library is used for locating errors */ - nand_chip->ecc.priv = nand_bch_init(mtd, - nand_chip->ecc.size, - nand_chip->ecc.bytes, - &ecclayout); + nand_chip->ecc.priv = nand_bch_init(mtd); if (!nand_chip->ecc.priv) { dev_err(&info->pdev->dev, "unable to use BCH library\n"); err = -EINVAL; @@ -2002,9 +2002,6 @@ static int omap_nand_probe(struct platform_device *pdev) goto return_error; } - if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) - goto scan_tail; - /* all OOB bytes from oobfree->offset till end off OOB are free */ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; /* check if NAND device's OOB is enough to store ECC signatures */ @@ -2015,7 +2012,6 @@ static int omap_nand_probe(struct platform_device *pdev) err = -EINVAL; goto return_error; } - nand_chip->ecc.layout = ecclayout; scan_tail: /* second phase scan */ diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index a0e26dea1424..e4e50da30444 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -73,7 +73,6 @@ static int plat_nand_probe(struct platform_device *pdev) data->chip.bbt_options |= pdata->chip.bbt_options; data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; - data->chip.ecc.layout = pdata->chip.ecclayout; data->chip.ecc.mode = NAND_ECC_SOFT; platform_set_drvdata(pdev, data); diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 86fc245dc71a..d6508856da99 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -131,11 +131,23 @@ #define READ_ID_BYTES 7 /* macros for registers read/write */ -#define nand_writel(info, off, val) \ - writel_relaxed((val), (info)->mmio_base + (off)) - -#define nand_readl(info, off) \ - readl_relaxed((info)->mmio_base + (off)) +#define nand_writel(info, off, val) \ + do { \ + dev_vdbg(&info->pdev->dev, \ + "%s():%d nand_writel(0x%x, 0x%04x)\n", \ + __func__, __LINE__, (val), (off)); \ + writel_relaxed((val), (info)->mmio_base + (off)); \ + } while (0) + +#define nand_readl(info, off) \ + ({ \ + unsigned int _v; \ + _v = readl_relaxed((info)->mmio_base + (off)); \ + dev_vdbg(&info->pdev->dev, \ + "%s():%d nand_readl(0x%04x) = 0x%x\n", \ + __func__, __LINE__, (off), _v); \ + _v; \ + }) /* error code and state */ enum { @@ -199,7 +211,6 @@ struct pxa3xx_nand_info { struct dma_chan *dma_chan; dma_cookie_t dma_cookie; int drcmr_dat; - int drcmr_cmd; unsigned char *data_buff; unsigned char *oob_buff; @@ -222,15 +233,44 @@ struct pxa3xx_nand_info { int use_spare; /* use spare ? */ int need_wait; - unsigned int data_size; /* data to be read from FIFO */ - unsigned int chunk_size; /* split commands chunk size */ - unsigned int oob_size; + /* Amount of real data per full chunk */ + unsigned int chunk_size; + + /* Amount of spare data per full chunk */ unsigned int spare_size; + + /* Number of full chunks (i.e chunk_size + spare_size) */ + unsigned int nfullchunks; + + /* + * Total number of chunks. If equal to nfullchunks, then there + * are only full chunks. Otherwise, there is one last chunk of + * size (last_chunk_size + last_spare_size) + */ + unsigned int ntotalchunks; + + /* Amount of real data in the last chunk */ + unsigned int last_chunk_size; + + /* Amount of spare data in the last chunk */ + unsigned int last_spare_size; + unsigned int ecc_size; unsigned int ecc_err_cnt; unsigned int max_bitflips; int retcode; + /* + * Variables only valid during command + * execution. step_chunk_size and step_spare_size is the + * amount of real data and spare data in the current + * chunk. cur_chunk is the current chunk being + * read/programmed. + */ + unsigned int step_chunk_size; + unsigned int step_spare_size; + unsigned int cur_chunk; + /* cached register value */ uint32_t reg_ndcr; uint32_t ndtr0cs0; @@ -526,25 +566,6 @@ static int pxa3xx_nand_init(struct pxa3xx_nand_host *host) return 0; } -/* - * Set the data and OOB size, depending on the selected - * spare and ECC configuration. - * Only applicable to READ0, READOOB and PAGEPROG commands. - */ -static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info, - struct mtd_info *mtd) -{ - int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; - - info->data_size = mtd->writesize; - if (!oob_enable) - return; - - info->oob_size = info->spare_size; - if (!info->use_ecc) - info->oob_size += info->ecc_size; -} - /** * NOTE: it is a must to set ND_RUN firstly, then write * command buffer, otherwise, it does not work. @@ -660,28 +681,28 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) static void handle_data_pio(struct pxa3xx_nand_info *info) { - unsigned int do_bytes = min(info->data_size, info->chunk_size); - switch (info->state) { case STATE_PIO_WRITING: - writesl(info->mmio_base + NDDB, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + if (info->step_chunk_size) + writesl(info->mmio_base + NDDB, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(info->step_chunk_size, 4)); - if (info->oob_size > 0) + if (info->step_spare_size) writesl(info->mmio_base + NDDB, info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + DIV_ROUND_UP(info->step_spare_size, 4)); break; case STATE_PIO_READING: - drain_fifo(info, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + if (info->step_chunk_size) + drain_fifo(info, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(info->step_chunk_size, 4)); - if (info->oob_size > 0) + if (info->step_spare_size) drain_fifo(info, info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + DIV_ROUND_UP(info->step_spare_size, 4)); break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, @@ -690,9 +711,8 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) } /* Update buffer pointers for multi-page read/write */ - info->data_buff_pos += do_bytes; - info->oob_buff_pos += info->oob_size; - info->data_size -= do_bytes; + info->data_buff_pos += info->step_chunk_size; + info->oob_buff_pos += info->step_spare_size; } static void pxa3xx_nand_data_dma_irq(void *data) @@ -733,8 +753,9 @@ static void start_data_dma(struct pxa3xx_nand_info *info) info->state); BUG(); } - info->sg.length = info->data_size + - (info->oob_size ? info->spare_size + info->ecc_size : 0); + info->sg.length = info->chunk_size; + if (info->use_spare) + info->sg.length += info->spare_size + info->ecc_size; dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir); tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction, @@ -895,9 +916,11 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) /* reset data and oob column point to handle data */ info->buf_start = 0; info->buf_count = 0; - info->oob_size = 0; info->data_buff_pos = 0; info->oob_buff_pos = 0; + info->step_chunk_size = 0; + info->step_spare_size = 0; + info->cur_chunk = 0; info->use_ecc = 0; info->use_spare = 1; info->retcode = ERR_NONE; @@ -909,8 +932,6 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) case NAND_CMD_READ0: case NAND_CMD_PAGEPROG: info->use_ecc = 1; - case NAND_CMD_READOOB: - pxa3xx_set_datasize(info, mtd); break; case NAND_CMD_PARAM: info->use_spare = 0; @@ -969,6 +990,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, if (command == NAND_CMD_READOOB) info->buf_start += mtd->writesize; + if (info->cur_chunk < info->nfullchunks) { + info->step_chunk_size = info->chunk_size; + info->step_spare_size = info->spare_size; + } else { + info->step_chunk_size = info->last_chunk_size; + info->step_spare_size = info->last_spare_size; + } + /* * Multiple page read needs an 'extended command type' field, * which is either naked-read or last-read according to the @@ -980,8 +1009,8 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) | NDCB0_LEN_OVRD | NDCB0_EXT_CMD_TYPE(ext_cmd_type); - info->ndcb3 = info->chunk_size + - info->oob_size; + info->ndcb3 = info->step_chunk_size + + info->step_spare_size; } set_command_address(info, mtd->writesize, column, page_addr); @@ -1001,8 +1030,6 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | NDCB0_EXT_CMD_TYPE(ext_cmd_type) | addr_cycle | command; - /* No data transfer in this case */ - info->data_size = 0; exec_cmd = 1; } break; @@ -1014,6 +1041,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, break; } + if (info->cur_chunk < info->nfullchunks) { + info->step_chunk_size = info->chunk_size; + info->step_spare_size = info->spare_size; + } else { + info->step_chunk_size = info->last_chunk_size; + info->step_spare_size = info->last_spare_size; + } + /* Second command setting for large pages */ if (mtd->writesize > PAGE_CHUNK_SIZE) { /* @@ -1024,14 +1059,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | NDCB0_LEN_OVRD | NDCB0_EXT_CMD_TYPE(ext_cmd_type); - info->ndcb3 = info->chunk_size + - info->oob_size; + info->ndcb3 = info->step_chunk_size + + info->step_spare_size; /* * This is the command dispatch that completes a chunked * page program operation. */ - if (info->data_size == 0) { + if (info->cur_chunk == info->ntotalchunks) { info->ndcb0 = NDCB0_CMD_TYPE(0x1) | NDCB0_EXT_CMD_TYPE(ext_cmd_type) | command; @@ -1058,7 +1093,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | command; info->ndcb1 = (column & 0xFF); info->ndcb3 = INIT_BUFFER_SIZE; - info->data_size = INIT_BUFFER_SIZE; + info->step_chunk_size = INIT_BUFFER_SIZE; break; case NAND_CMD_READID: @@ -1068,7 +1103,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | command; info->ndcb1 = (column & 0xFF); - info->data_size = 8; + info->step_chunk_size = 8; break; case NAND_CMD_STATUS: info->buf_count = 1; @@ -1076,7 +1111,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | NDCB0_ADDR_CYC(1) | command; - info->data_size = 8; + info->step_chunk_size = 8; break; case NAND_CMD_ERASE1: @@ -1217,6 +1252,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd, init_completion(&info->dev_ready); do { info->state = STATE_PREPARED; + exec_cmd = prepare_set_command(info, command, ext_cmd_type, column, page_addr); if (!exec_cmd) { @@ -1236,22 +1272,30 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd, break; } + /* Only a few commands need several steps */ + if (command != NAND_CMD_PAGEPROG && + command != NAND_CMD_READ0 && + command != NAND_CMD_READOOB) + break; + + info->cur_chunk++; + /* Check if the sequence is complete */ - if (info->data_size == 0 && command != NAND_CMD_PAGEPROG) + if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG) break; /* * After a splitted program command sequence has issued * the command dispatch, the command sequence is complete. */ - if (info->data_size == 0 && + if (info->cur_chunk == (info->ntotalchunks + 1) && command == NAND_CMD_PAGEPROG && ext_cmd_type == EXT_CMD_TYPE_DISPATCH) break; if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) { /* Last read: issue a 'last naked read' */ - if (info->data_size == info->chunk_size) + if (info->cur_chunk == info->ntotalchunks - 1) ext_cmd_type = EXT_CMD_TYPE_LAST_RW; else ext_cmd_type = EXT_CMD_TYPE_NAKED_RW; @@ -1261,7 +1305,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd, * the command dispatch must be issued to complete. */ } else if (command == NAND_CMD_PAGEPROG && - info->data_size == 0) { + info->cur_chunk == info->ntotalchunks) { ext_cmd_type = EXT_CMD_TYPE_DISPATCH; } } while (1); @@ -1506,6 +1550,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, int strength, int ecc_stepsize, int page_size) { if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 2048; info->spare_size = 40; info->ecc_size = 24; @@ -1514,6 +1560,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, ecc->strength = 1; } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) { + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 512; info->spare_size = 8; info->ecc_size = 8; @@ -1527,6 +1575,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, */ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) { info->ecc_bch = 1; + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 2048; info->spare_size = 32; info->ecc_size = 32; @@ -1537,6 +1587,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { info->ecc_bch = 1; + info->nfullchunks = 2; + info->ntotalchunks = 2; info->chunk_size = 2048; info->spare_size = 32; info->ecc_size = 32; @@ -1551,8 +1603,12 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, */ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) { info->ecc_bch = 1; + info->nfullchunks = 4; + info->ntotalchunks = 5; info->chunk_size = 1024; info->spare_size = 0; + info->last_chunk_size = 0; + info->last_spare_size = 64; info->ecc_size = 32; ecc->mode = NAND_ECC_HW; ecc->size = info->chunk_size; @@ -1738,7 +1794,7 @@ static int alloc_nand_resource(struct platform_device *pdev) if (ret < 0) return ret; - if (use_dma) { + if (!np && use_dma) { r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (r == NULL) { dev_err(&pdev->dev, @@ -1747,15 +1803,6 @@ static int alloc_nand_resource(struct platform_device *pdev) goto fail_disable_clk; } info->drcmr_dat = r->start; - - r = platform_get_resource(pdev, IORESOURCE_DMA, 1); - if (r == NULL) { - dev_err(&pdev->dev, - "no resource defined for cmd DMA\n"); - ret = -ENXIO; - goto fail_disable_clk; - } - info->drcmr_cmd = r->start; } irq = platform_get_irq(pdev, 0); diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c new file mode 100644 index 000000000000..f550a57e6eea --- /dev/null +++ b/drivers/mtd/nand/qcom_nandc.c @@ -0,0 +1,2223 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/bitops.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/module.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/partitions.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_mtd.h> +#include <linux/delay.h> + +/* NANDc reg offsets */ +#define NAND_FLASH_CMD 0x00 +#define NAND_ADDR0 0x04 +#define NAND_ADDR1 0x08 +#define NAND_FLASH_CHIP_SELECT 0x0c +#define NAND_EXEC_CMD 0x10 +#define NAND_FLASH_STATUS 0x14 +#define NAND_BUFFER_STATUS 0x18 +#define NAND_DEV0_CFG0 0x20 +#define NAND_DEV0_CFG1 0x24 +#define NAND_DEV0_ECC_CFG 0x28 +#define NAND_DEV1_ECC_CFG 0x2c +#define NAND_DEV1_CFG0 0x30 +#define NAND_DEV1_CFG1 0x34 +#define NAND_READ_ID 0x40 +#define NAND_READ_STATUS 0x44 +#define NAND_DEV_CMD0 0xa0 +#define NAND_DEV_CMD1 0xa4 +#define NAND_DEV_CMD2 0xa8 +#define NAND_DEV_CMD_VLD 0xac +#define SFLASHC_BURST_CFG 0xe0 +#define NAND_ERASED_CW_DETECT_CFG 0xe8 +#define NAND_ERASED_CW_DETECT_STATUS 0xec +#define NAND_EBI2_ECC_BUF_CFG 0xf0 +#define FLASH_BUF_ACC 0x100 + +#define NAND_CTRL 0xf00 +#define NAND_VERSION 0xf08 +#define NAND_READ_LOCATION_0 0xf20 +#define NAND_READ_LOCATION_1 0xf24 + +/* dummy register offsets, used by write_reg_dma */ +#define NAND_DEV_CMD1_RESTORE 0xdead +#define NAND_DEV_CMD_VLD_RESTORE 0xbeef + +/* NAND_FLASH_CMD bits */ +#define PAGE_ACC BIT(4) +#define LAST_PAGE BIT(5) + +/* NAND_FLASH_CHIP_SELECT bits */ +#define NAND_DEV_SEL 0 +#define DM_EN BIT(2) + +/* NAND_FLASH_STATUS bits */ +#define FS_OP_ERR BIT(4) +#define FS_READY_BSY_N BIT(5) +#define FS_MPU_ERR BIT(8) +#define FS_DEVICE_STS_ERR BIT(16) +#define FS_DEVICE_WP BIT(23) + +/* NAND_BUFFER_STATUS bits */ +#define BS_UNCORRECTABLE_BIT BIT(8) +#define BS_CORRECTABLE_ERR_MSK 0x1f + +/* NAND_DEVn_CFG0 bits */ +#define DISABLE_STATUS_AFTER_WRITE 4 +#define CW_PER_PAGE 6 +#define UD_SIZE_BYTES 9 +#define ECC_PARITY_SIZE_BYTES_RS 19 +#define SPARE_SIZE_BYTES 23 +#define NUM_ADDR_CYCLES 27 +#define STATUS_BFR_READ 30 +#define SET_RD_MODE_AFTER_STATUS 31 + +/* NAND_DEVn_CFG0 bits */ +#define DEV0_CFG1_ECC_DISABLE 0 +#define WIDE_FLASH 1 +#define NAND_RECOVERY_CYCLES 2 +#define CS_ACTIVE_BSY 5 +#define BAD_BLOCK_BYTE_NUM 6 +#define BAD_BLOCK_IN_SPARE_AREA 16 +#define WR_RD_BSY_GAP 17 +#define ENABLE_BCH_ECC 27 + +/* NAND_DEV0_ECC_CFG bits */ +#define ECC_CFG_ECC_DISABLE 0 +#define ECC_SW_RESET 1 +#define ECC_MODE 4 +#define ECC_PARITY_SIZE_BYTES_BCH 8 +#define ECC_NUM_DATA_BYTES 16 +#define ECC_FORCE_CLK_OPEN 30 + +/* NAND_DEV_CMD1 bits */ +#define READ_ADDR 0 + +/* NAND_DEV_CMD_VLD bits */ +#define READ_START_VLD 0 + +/* NAND_EBI2_ECC_BUF_CFG bits */ +#define NUM_STEPS 0 + +/* NAND_ERASED_CW_DETECT_CFG bits */ +#define ERASED_CW_ECC_MASK 1 +#define AUTO_DETECT_RES 0 +#define MASK_ECC (1 << ERASED_CW_ECC_MASK) +#define RESET_ERASED_DET (1 << AUTO_DETECT_RES) +#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES) +#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC) +#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC) + +/* NAND_ERASED_CW_DETECT_STATUS bits */ +#define PAGE_ALL_ERASED BIT(7) +#define CODEWORD_ALL_ERASED BIT(6) +#define PAGE_ERASED BIT(5) +#define CODEWORD_ERASED BIT(4) +#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED) +#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED) + +/* Version Mask */ +#define NAND_VERSION_MAJOR_MASK 0xf0000000 +#define NAND_VERSION_MAJOR_SHIFT 28 +#define NAND_VERSION_MINOR_MASK 0x0fff0000 +#define NAND_VERSION_MINOR_SHIFT 16 + +/* NAND OP_CMDs */ +#define PAGE_READ 0x2 +#define PAGE_READ_WITH_ECC 0x3 +#define PAGE_READ_WITH_ECC_SPARE 0x4 +#define PROGRAM_PAGE 0x6 +#define PAGE_PROGRAM_WITH_ECC 0x7 +#define PROGRAM_PAGE_SPARE 0x9 +#define BLOCK_ERASE 0xa +#define FETCH_ID 0xb +#define RESET_DEVICE 0xd + +/* + * the NAND controller performs reads/writes with ECC in 516 byte chunks. + * the driver calls the chunks 'step' or 'codeword' interchangeably + */ +#define NANDC_STEP_SIZE 512 + +/* + * the largest page size we support is 8K, this will have 16 steps/codewords + * of 512 bytes each + */ +#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE) + +/* we read at most 3 registers per codeword scan */ +#define MAX_REG_RD (3 * MAX_NUM_STEPS) + +/* ECC modes supported by the controller */ +#define ECC_NONE BIT(0) +#define ECC_RS_4BIT BIT(1) +#define ECC_BCH_4BIT BIT(2) +#define ECC_BCH_8BIT BIT(3) + +struct desc_info { + struct list_head node; + + enum dma_data_direction dir; + struct scatterlist sgl; + struct dma_async_tx_descriptor *dma_desc; +}; + +/* + * holds the current register values that we want to write. acts as a contiguous + * chunk of memory which we use to write the controller registers through DMA. + */ +struct nandc_regs { + __le32 cmd; + __le32 addr0; + __le32 addr1; + __le32 chip_sel; + __le32 exec; + + __le32 cfg0; + __le32 cfg1; + __le32 ecc_bch_cfg; + + __le32 clrflashstatus; + __le32 clrreadstatus; + + __le32 cmd1; + __le32 vld; + + __le32 orig_cmd1; + __le32 orig_vld; + + __le32 ecc_buf_cfg; +}; + +/* + * NAND controller data struct + * + * @controller: base controller structure + * @host_list: list containing all the chips attached to the + * controller + * @dev: parent device + * @base: MMIO base + * @base_dma: physical base address of controller registers + * @core_clk: controller clock + * @aon_clk: another controller clock + * + * @chan: dma channel + * @cmd_crci: ADM DMA CRCI for command flow control + * @data_crci: ADM DMA CRCI for data flow control + * @desc_list: DMA descriptor list (list of desc_infos) + * + * @data_buffer: our local DMA buffer for page read/writes, + * used when we can't use the buffer provided + * by upper layers directly + * @buf_size/count/start: markers for chip->read_buf/write_buf functions + * @reg_read_buf: local buffer for reading back registers via DMA + * @reg_read_pos: marker for data read in reg_read_buf + * + * @regs: a contiguous chunk of memory for DMA register + * writes. contains the register values to be + * written to controller + * @cmd1/vld: some fixed controller register values + * @ecc_modes: supported ECC modes by the current controller, + * initialized via DT match data + */ +struct qcom_nand_controller { + struct nand_hw_control controller; + struct list_head host_list; + + struct device *dev; + + void __iomem *base; + dma_addr_t base_dma; + + struct clk *core_clk; + struct clk *aon_clk; + + struct dma_chan *chan; + unsigned int cmd_crci; + unsigned int data_crci; + struct list_head desc_list; + + u8 *data_buffer; + int buf_size; + int buf_count; + int buf_start; + + __le32 *reg_read_buf; + int reg_read_pos; + + struct nandc_regs *regs; + + u32 cmd1, vld; + u32 ecc_modes; +}; + +/* + * NAND chip structure + * + * @chip: base NAND chip structure + * @node: list node to add itself to host_list in + * qcom_nand_controller + * + * @cs: chip select value for this chip + * @cw_size: the number of bytes in a single step/codeword + * of a page, consisting of all data, ecc, spare + * and reserved bytes + * @cw_data: the number of bytes within a codeword protected + * by ECC + * @use_ecc: request the controller to use ECC for the + * upcoming read/write + * @bch_enabled: flag to tell whether BCH ECC mode is used + * @ecc_bytes_hw: ECC bytes used by controller hardware for this + * chip + * @status: value to be returned if NAND_CMD_STATUS command + * is executed + * @last_command: keeps track of last command on this chip. used + * for reading correct status + * + * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for + * ecc/non-ecc mode for the current nand flash + * device + */ +struct qcom_nand_host { + struct nand_chip chip; + struct list_head node; + + int cs; + int cw_size; + int cw_data; + bool use_ecc; + bool bch_enabled; + int ecc_bytes_hw; + int spare_bytes; + int bbm_size; + u8 status; + int last_command; + + u32 cfg0, cfg1; + u32 cfg0_raw, cfg1_raw; + u32 ecc_buf_cfg; + u32 ecc_bch_cfg; + u32 clrflashstatus; + u32 clrreadstatus; +}; + +static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip) +{ + return container_of(chip, struct qcom_nand_host, chip); +} + +static inline struct qcom_nand_controller * +get_qcom_nand_controller(struct nand_chip *chip) +{ + return container_of(chip->controller, struct qcom_nand_controller, + controller); +} + +static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset) +{ + return ioread32(nandc->base + offset); +} + +static inline void nandc_write(struct qcom_nand_controller *nandc, int offset, + u32 val) +{ + iowrite32(val, nandc->base + offset); +} + +static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset) +{ + switch (offset) { + case NAND_FLASH_CMD: + return ®s->cmd; + case NAND_ADDR0: + return ®s->addr0; + case NAND_ADDR1: + return ®s->addr1; + case NAND_FLASH_CHIP_SELECT: + return ®s->chip_sel; + case NAND_EXEC_CMD: + return ®s->exec; + case NAND_FLASH_STATUS: + return ®s->clrflashstatus; + case NAND_DEV0_CFG0: + return ®s->cfg0; + case NAND_DEV0_CFG1: + return ®s->cfg1; + case NAND_DEV0_ECC_CFG: + return ®s->ecc_bch_cfg; + case NAND_READ_STATUS: + return ®s->clrreadstatus; + case NAND_DEV_CMD1: + return ®s->cmd1; + case NAND_DEV_CMD1_RESTORE: + return ®s->orig_cmd1; + case NAND_DEV_CMD_VLD: + return ®s->vld; + case NAND_DEV_CMD_VLD_RESTORE: + return ®s->orig_vld; + case NAND_EBI2_ECC_BUF_CFG: + return ®s->ecc_buf_cfg; + default: + return NULL; + } +} + +static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset, + u32 val) +{ + struct nandc_regs *regs = nandc->regs; + __le32 *reg; + + reg = offset_to_nandc_reg(regs, offset); + + if (reg) + *reg = cpu_to_le32(val); +} + +/* helper to configure address register values */ +static void set_address(struct qcom_nand_host *host, u16 column, int page) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + if (chip->options & NAND_BUSWIDTH_16) + column >>= 1; + + nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column); + nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff); +} + +/* + * update_rw_regs: set up read/write register values, these will be + * written to the NAND controller registers via DMA + * + * @num_cw: number of steps for the read/write operation + * @read: read or write operation + */ +static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + u32 cmd, cfg0, cfg1, ecc_bch_cfg; + + if (read) { + if (host->use_ecc) + cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; + else + cmd = PAGE_READ | PAGE_ACC | LAST_PAGE; + } else { + cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; + } + + if (host->use_ecc) { + cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) | + (num_cw - 1) << CW_PER_PAGE; + + cfg1 = host->cfg1; + ecc_bch_cfg = host->ecc_bch_cfg; + } else { + cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) | + (num_cw - 1) << CW_PER_PAGE; + + cfg1 = host->cfg1_raw; + ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; + } + + nandc_set_reg(nandc, NAND_FLASH_CMD, cmd); + nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0); + nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1); + nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg); + nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg); + nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus); + nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus); + nandc_set_reg(nandc, NAND_EXEC_CMD, 1); +} + +static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read, + int reg_off, const void *vaddr, int size, + bool flow_control) +{ + struct desc_info *desc; + struct dma_async_tx_descriptor *dma_desc; + struct scatterlist *sgl; + struct dma_slave_config slave_conf; + enum dma_transfer_direction dir_eng; + int ret; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + sgl = &desc->sgl; + + sg_init_one(sgl, vaddr, size); + + if (read) { + dir_eng = DMA_DEV_TO_MEM; + desc->dir = DMA_FROM_DEVICE; + } else { + dir_eng = DMA_MEM_TO_DEV; + desc->dir = DMA_TO_DEVICE; + } + + ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir); + if (ret == 0) { + ret = -ENOMEM; + goto err; + } + + memset(&slave_conf, 0x00, sizeof(slave_conf)); + + slave_conf.device_fc = flow_control; + if (read) { + slave_conf.src_maxburst = 16; + slave_conf.src_addr = nandc->base_dma + reg_off; + slave_conf.slave_id = nandc->data_crci; + } else { + slave_conf.dst_maxburst = 16; + slave_conf.dst_addr = nandc->base_dma + reg_off; + slave_conf.slave_id = nandc->cmd_crci; + } + + ret = dmaengine_slave_config(nandc->chan, &slave_conf); + if (ret) { + dev_err(nandc->dev, "failed to configure dma channel\n"); + goto err; + } + + dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0); + if (!dma_desc) { + dev_err(nandc->dev, "failed to prepare desc\n"); + ret = -EINVAL; + goto err; + } + + desc->dma_desc = dma_desc; + + list_add_tail(&desc->node, &nandc->desc_list); + + return 0; +err: + kfree(desc); + + return ret; +} + +/* + * read_reg_dma: prepares a descriptor to read a given number of + * contiguous registers to the reg_read_buf pointer + * + * @first: offset of the first register in the contiguous block + * @num_regs: number of registers to read + */ +static int read_reg_dma(struct qcom_nand_controller *nandc, int first, + int num_regs) +{ + bool flow_control = false; + void *vaddr; + int size; + + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) + flow_control = true; + + size = num_regs * sizeof(u32); + vaddr = nandc->reg_read_buf + nandc->reg_read_pos; + nandc->reg_read_pos += num_regs; + + return prep_dma_desc(nandc, true, first, vaddr, size, flow_control); +} + +/* + * write_reg_dma: prepares a descriptor to write a given number of + * contiguous registers + * + * @first: offset of the first register in the contiguous block + * @num_regs: number of registers to write + */ +static int write_reg_dma(struct qcom_nand_controller *nandc, int first, + int num_regs) +{ + bool flow_control = false; + struct nandc_regs *regs = nandc->regs; + void *vaddr; + int size; + + vaddr = offset_to_nandc_reg(regs, first); + + if (first == NAND_FLASH_CMD) + flow_control = true; + + if (first == NAND_DEV_CMD1_RESTORE) + first = NAND_DEV_CMD1; + + if (first == NAND_DEV_CMD_VLD_RESTORE) + first = NAND_DEV_CMD_VLD; + + size = num_regs * sizeof(u32); + + return prep_dma_desc(nandc, false, first, vaddr, size, flow_control); +} + +/* + * read_data_dma: prepares a DMA descriptor to transfer data from the + * controller's internal buffer to the buffer 'vaddr' + * + * @reg_off: offset within the controller's data buffer + * @vaddr: virtual address of the buffer we want to write to + * @size: DMA transaction size in bytes + */ +static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off, + const u8 *vaddr, int size) +{ + return prep_dma_desc(nandc, true, reg_off, vaddr, size, false); +} + +/* + * write_data_dma: prepares a DMA descriptor to transfer data from + * 'vaddr' to the controller's internal buffer + * + * @reg_off: offset within the controller's data buffer + * @vaddr: virtual address of the buffer we want to read from + * @size: DMA transaction size in bytes + */ +static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off, + const u8 *vaddr, int size) +{ + return prep_dma_desc(nandc, false, reg_off, vaddr, size, false); +} + +/* + * helper to prepare dma descriptors to configure registers needed for reading a + * codeword/step in a page + */ +static void config_cw_read(struct qcom_nand_controller *nandc) +{ + write_reg_dma(nandc, NAND_FLASH_CMD, 3); + write_reg_dma(nandc, NAND_DEV0_CFG0, 3); + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1); + + write_reg_dma(nandc, NAND_EXEC_CMD, 1); + + read_reg_dma(nandc, NAND_FLASH_STATUS, 2); + read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1); +} + +/* + * helpers to prepare dma descriptors used to configure registers needed for + * writing a codeword/step in a page + */ +static void config_cw_write_pre(struct qcom_nand_controller *nandc) +{ + write_reg_dma(nandc, NAND_FLASH_CMD, 3); + write_reg_dma(nandc, NAND_DEV0_CFG0, 3); + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1); +} + +static void config_cw_write_post(struct qcom_nand_controller *nandc) +{ + write_reg_dma(nandc, NAND_EXEC_CMD, 1); + + read_reg_dma(nandc, NAND_FLASH_STATUS, 1); + + write_reg_dma(nandc, NAND_FLASH_STATUS, 1); + write_reg_dma(nandc, NAND_READ_STATUS, 1); +} + +/* + * the following functions are used within chip->cmdfunc() to perform different + * NAND_CMD_* commands + */ + +/* sets up descriptors for NAND_CMD_PARAM */ +static int nandc_param(struct qcom_nand_host *host) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + /* + * NAND_CMD_PARAM is called before we know much about the FLASH chip + * in use. we configure the controller to perform a raw read of 512 + * bytes to read onfi params + */ + nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE); + nandc_set_reg(nandc, NAND_ADDR0, 0); + nandc_set_reg(nandc, NAND_ADDR1, 0); + nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE + | 512 << UD_SIZE_BYTES + | 5 << NUM_ADDR_CYCLES + | 0 << SPARE_SIZE_BYTES); + nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES + | 0 << CS_ACTIVE_BSY + | 17 << BAD_BLOCK_BYTE_NUM + | 1 << BAD_BLOCK_IN_SPARE_AREA + | 2 << WR_RD_BSY_GAP + | 0 << WIDE_FLASH + | 1 << DEV0_CFG1_ECC_DISABLE); + nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); + + /* configure CMD1 and VLD for ONFI param probing */ + nandc_set_reg(nandc, NAND_DEV_CMD_VLD, + (nandc->vld & ~(1 << READ_START_VLD)) + | 0 << READ_START_VLD); + nandc_set_reg(nandc, NAND_DEV_CMD1, + (nandc->cmd1 & ~(0xFF << READ_ADDR)) + | NAND_CMD_PARAM << READ_ADDR); + + nandc_set_reg(nandc, NAND_EXEC_CMD, 1); + + nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1); + nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); + + write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1); + write_reg_dma(nandc, NAND_DEV_CMD1, 1); + + nandc->buf_count = 512; + memset(nandc->data_buffer, 0xff, nandc->buf_count); + + config_cw_read(nandc); + + read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, + nandc->buf_count); + + /* restore CMD1 and VLD regs */ + write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1); + write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1); + + return 0; +} + +/* sets up descriptors for NAND_CMD_ERASE1 */ +static int erase_block(struct qcom_nand_host *host, int page_addr) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + nandc_set_reg(nandc, NAND_FLASH_CMD, + BLOCK_ERASE | PAGE_ACC | LAST_PAGE); + nandc_set_reg(nandc, NAND_ADDR0, page_addr); + nandc_set_reg(nandc, NAND_ADDR1, 0); + nandc_set_reg(nandc, NAND_DEV0_CFG0, + host->cfg0_raw & ~(7 << CW_PER_PAGE)); + nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw); + nandc_set_reg(nandc, NAND_EXEC_CMD, 1); + nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus); + nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus); + + write_reg_dma(nandc, NAND_FLASH_CMD, 3); + write_reg_dma(nandc, NAND_DEV0_CFG0, 2); + write_reg_dma(nandc, NAND_EXEC_CMD, 1); + + read_reg_dma(nandc, NAND_FLASH_STATUS, 1); + + write_reg_dma(nandc, NAND_FLASH_STATUS, 1); + write_reg_dma(nandc, NAND_READ_STATUS, 1); + + return 0; +} + +/* sets up descriptors for NAND_CMD_READID */ +static int read_id(struct qcom_nand_host *host, int column) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + if (column == -1) + return 0; + + nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); + nandc_set_reg(nandc, NAND_ADDR0, column); + nandc_set_reg(nandc, NAND_ADDR1, 0); + nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); + nandc_set_reg(nandc, NAND_EXEC_CMD, 1); + + write_reg_dma(nandc, NAND_FLASH_CMD, 4); + write_reg_dma(nandc, NAND_EXEC_CMD, 1); + + read_reg_dma(nandc, NAND_READ_ID, 1); + + return 0; +} + +/* sets up descriptors for NAND_CMD_RESET */ +static int reset(struct qcom_nand_host *host) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); + nandc_set_reg(nandc, NAND_EXEC_CMD, 1); + + write_reg_dma(nandc, NAND_FLASH_CMD, 1); + write_reg_dma(nandc, NAND_EXEC_CMD, 1); + + read_reg_dma(nandc, NAND_FLASH_STATUS, 1); + + return 0; +} + +/* helpers to submit/free our list of dma descriptors */ +static int submit_descs(struct qcom_nand_controller *nandc) +{ + struct desc_info *desc; + dma_cookie_t cookie = 0; + + list_for_each_entry(desc, &nandc->desc_list, node) + cookie = dmaengine_submit(desc->dma_desc); + + if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE) + return -ETIMEDOUT; + + return 0; +} + +static void free_descs(struct qcom_nand_controller *nandc) +{ + struct desc_info *desc, *n; + + list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { + list_del(&desc->node); + dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir); + kfree(desc); + } +} + +/* reset the register read buffer for next NAND operation */ +static void clear_read_regs(struct qcom_nand_controller *nandc) +{ + nandc->reg_read_pos = 0; + memset(nandc->reg_read_buf, 0, + MAX_REG_RD * sizeof(*nandc->reg_read_buf)); +} + +static void pre_command(struct qcom_nand_host *host, int command) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + nandc->buf_count = 0; + nandc->buf_start = 0; + host->use_ecc = false; + host->last_command = command; + + clear_read_regs(nandc); +} + +/* + * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our + * privately maintained status byte, this status byte can be read after + * NAND_CMD_STATUS is called + */ +static void parse_erase_write_errors(struct qcom_nand_host *host, int command) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int num_cw; + int i; + + num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1; + + for (i = 0; i < num_cw; i++) { + u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]); + + if (flash_status & FS_MPU_ERR) + host->status &= ~NAND_STATUS_WP; + + if (flash_status & FS_OP_ERR || (i == (num_cw - 1) && + (flash_status & + FS_DEVICE_STS_ERR))) + host->status |= NAND_STATUS_FAIL; + } +} + +static void post_command(struct qcom_nand_host *host, int command) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + switch (command) { + case NAND_CMD_READID: + memcpy(nandc->data_buffer, nandc->reg_read_buf, + nandc->buf_count); + break; + case NAND_CMD_PAGEPROG: + case NAND_CMD_ERASE1: + parse_erase_write_errors(host, command); + break; + default: + break; + } +} + +/* + * Implements chip->cmdfunc. It's only used for a limited set of commands. + * The rest of the commands wouldn't be called by upper layers. For example, + * NAND_CMD_READOOB would never be called because we have our own versions + * of read_oob ops for nand_ecc_ctrl. + */ +static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command, + int column, int page_addr) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + bool wait = false; + int ret = 0; + + pre_command(host, command); + + switch (command) { + case NAND_CMD_RESET: + ret = reset(host); + wait = true; + break; + + case NAND_CMD_READID: + nandc->buf_count = 4; + ret = read_id(host, column); + wait = true; + break; + + case NAND_CMD_PARAM: + ret = nandc_param(host); + wait = true; + break; + + case NAND_CMD_ERASE1: + ret = erase_block(host, page_addr); + wait = true; + break; + + case NAND_CMD_READ0: + /* we read the entire page for now */ + WARN_ON(column != 0); + + host->use_ecc = true; + set_address(host, 0, page_addr); + update_rw_regs(host, ecc->steps, true); + break; + + case NAND_CMD_SEQIN: + WARN_ON(column != 0); + set_address(host, 0, page_addr); + break; + + case NAND_CMD_PAGEPROG: + case NAND_CMD_STATUS: + case NAND_CMD_NONE: + default: + break; + } + + if (ret) { + dev_err(nandc->dev, "failure executing command %d\n", + command); + free_descs(nandc); + return; + } + + if (wait) { + ret = submit_descs(nandc); + if (ret) + dev_err(nandc->dev, + "failure submitting descs for command %d\n", + command); + } + + free_descs(nandc); + + post_command(host, command); +} + +/* + * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read + * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS. + * + * when using RS ECC, the HW reports the same erros when reading an erased CW, + * but it notifies that it is an erased CW by placing special characters at + * certain offsets in the buffer. + * + * verify if the page is erased or not, and fix up the page for RS ECC by + * replacing the special characters with 0xff. + */ +static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len) +{ + u8 empty1, empty2; + + /* + * an erased page flags an error in NAND_FLASH_STATUS, check if the page + * is erased by looking for 0x54s at offsets 3 and 175 from the + * beginning of each codeword + */ + + empty1 = data_buf[3]; + empty2 = data_buf[175]; + + /* + * if the erased codework markers, if they exist override them with + * 0xffs + */ + if ((empty1 == 0x54 && empty2 == 0xff) || + (empty1 == 0xff && empty2 == 0x54)) { + data_buf[3] = 0xff; + data_buf[175] = 0xff; + } + + /* + * check if the entire chunk contains 0xffs or not. if it doesn't, then + * restore the original values at the special offsets + */ + if (memchr_inv(data_buf, 0xff, data_len)) { + data_buf[3] = empty1; + data_buf[175] = empty2; + + return false; + } + + return true; +} + +struct read_stats { + __le32 flash; + __le32 buffer; + __le32 erased_cw; +}; + +/* + * reads back status registers set by the controller to notify page read + * errors. this is equivalent to what 'ecc->correct()' would do. + */ +static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf, + u8 *oob_buf) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + unsigned int max_bitflips = 0; + struct read_stats *buf; + int i; + + buf = (struct read_stats *)nandc->reg_read_buf; + + for (i = 0; i < ecc->steps; i++, buf++) { + u32 flash, buffer, erased_cw; + int data_len, oob_len; + + if (i == (ecc->steps - 1)) { + data_len = ecc->size - ((ecc->steps - 1) << 2); + oob_len = ecc->steps << 2; + } else { + data_len = host->cw_data; + oob_len = 0; + } + + flash = le32_to_cpu(buf->flash); + buffer = le32_to_cpu(buf->buffer); + erased_cw = le32_to_cpu(buf->erased_cw); + + if (flash & (FS_OP_ERR | FS_MPU_ERR)) { + bool erased; + + /* ignore erased codeword errors */ + if (host->bch_enabled) { + erased = (erased_cw & ERASED_CW) == ERASED_CW ? + true : false; + } else { + erased = erased_chunk_check_and_fixup(data_buf, + data_len); + } + + if (erased) { + data_buf += data_len; + if (oob_buf) + oob_buf += oob_len + ecc->bytes; + continue; + } + + if (buffer & BS_UNCORRECTABLE_BIT) { + int ret, ecclen, extraooblen; + void *eccbuf; + + eccbuf = oob_buf ? oob_buf + oob_len : NULL; + ecclen = oob_buf ? host->ecc_bytes_hw : 0; + extraooblen = oob_buf ? oob_len : 0; + + /* + * make sure it isn't an erased page reported + * as not-erased by HW because of a few bitflips + */ + ret = nand_check_erased_ecc_chunk(data_buf, + data_len, eccbuf, ecclen, oob_buf, + extraooblen, ecc->strength); + if (ret < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += ret; + max_bitflips = + max_t(unsigned int, max_bitflips, ret); + } + } + } else { + unsigned int stat; + + stat = buffer & BS_CORRECTABLE_ERR_MSK; + mtd->ecc_stats.corrected += stat; + max_bitflips = max(max_bitflips, stat); + } + + data_buf += data_len; + if (oob_buf) + oob_buf += oob_len + ecc->bytes; + } + + return max_bitflips; +} + +/* + * helper to perform the actual page read operation, used by ecc->read_page(), + * ecc->read_oob() + */ +static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf, + u8 *oob_buf) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int i, ret; + + /* queue cmd descs for each codeword */ + for (i = 0; i < ecc->steps; i++) { + int data_size, oob_size; + + if (i == (ecc->steps - 1)) { + data_size = ecc->size - ((ecc->steps - 1) << 2); + oob_size = (ecc->steps << 2) + host->ecc_bytes_hw + + host->spare_bytes; + } else { + data_size = host->cw_data; + oob_size = host->ecc_bytes_hw + host->spare_bytes; + } + + config_cw_read(nandc); + + if (data_buf) + read_data_dma(nandc, FLASH_BUF_ACC, data_buf, + data_size); + + /* + * when ecc is enabled, the controller doesn't read the real + * or dummy bad block markers in each chunk. To maintain a + * consistent layout across RAW and ECC reads, we just + * leave the real/dummy BBM offsets empty (i.e, filled with + * 0xffs) + */ + if (oob_buf) { + int j; + + for (j = 0; j < host->bbm_size; j++) + *oob_buf++ = 0xff; + + read_data_dma(nandc, FLASH_BUF_ACC + data_size, + oob_buf, oob_size); + } + + if (data_buf) + data_buf += data_size; + if (oob_buf) + oob_buf += oob_size; + } + + ret = submit_descs(nandc); + if (ret) + dev_err(nandc->dev, "failure to read page/oob\n"); + + free_descs(nandc); + + return ret; +} + +/* + * a helper that copies the last step/codeword of a page (containing free oob) + * into our local buffer + */ +static int copy_last_cw(struct qcom_nand_host *host, int page) +{ + struct nand_chip *chip = &host->chip; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int size; + int ret; + + clear_read_regs(nandc); + + size = host->use_ecc ? host->cw_data : host->cw_size; + + /* prepare a clean read buffer */ + memset(nandc->data_buffer, 0xff, size); + + set_address(host, host->cw_size * (ecc->steps - 1), page); + update_rw_regs(host, 1, true); + + config_cw_read(nandc); + + read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size); + + ret = submit_descs(nandc); + if (ret) + dev_err(nandc->dev, "failed to copy last codeword\n"); + + free_descs(nandc); + + return ret; +} + +/* implements ecc->read_page() */ +static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) +{ + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + u8 *data_buf, *oob_buf = NULL; + int ret; + + data_buf = buf; + oob_buf = oob_required ? chip->oob_poi : NULL; + + ret = read_page_ecc(host, data_buf, oob_buf); + if (ret) { + dev_err(nandc->dev, "failure to read page\n"); + return ret; + } + + return parse_read_errors(host, data_buf, oob_buf); +} + +/* implements ecc->read_page_raw() */ +static int qcom_nandc_read_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, uint8_t *buf, + int oob_required, int page) +{ + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + u8 *data_buf, *oob_buf; + struct nand_ecc_ctrl *ecc = &chip->ecc; + int i, ret; + + data_buf = buf; + oob_buf = chip->oob_poi; + + host->use_ecc = false; + update_rw_regs(host, ecc->steps, true); + + for (i = 0; i < ecc->steps; i++) { + int data_size1, data_size2, oob_size1, oob_size2; + int reg_off = FLASH_BUF_ACC; + + data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1); + oob_size1 = host->bbm_size; + + if (i == (ecc->steps - 1)) { + data_size2 = ecc->size - data_size1 - + ((ecc->steps - 1) << 2); + oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw + + host->spare_bytes; + } else { + data_size2 = host->cw_data - data_size1; + oob_size2 = host->ecc_bytes_hw + host->spare_bytes; + } + + config_cw_read(nandc); + + read_data_dma(nandc, reg_off, data_buf, data_size1); + reg_off += data_size1; + data_buf += data_size1; + + read_data_dma(nandc, reg_off, oob_buf, oob_size1); + reg_off += oob_size1; + oob_buf += oob_size1; + + read_data_dma(nandc, reg_off, data_buf, data_size2); + reg_off += data_size2; + data_buf += data_size2; + + read_data_dma(nandc, reg_off, oob_buf, oob_size2); + oob_buf += oob_size2; + } + + ret = submit_descs(nandc); + if (ret) + dev_err(nandc->dev, "failure to read raw page\n"); + + free_descs(nandc); + + return 0; +} + +/* implements ecc->read_oob() */ +static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) +{ + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret; + + clear_read_regs(nandc); + + host->use_ecc = true; + set_address(host, 0, page); + update_rw_regs(host, ecc->steps, true); + + ret = read_page_ecc(host, NULL, chip->oob_poi); + if (ret) + dev_err(nandc->dev, "failure to read oob\n"); + + return ret; +} + +/* implements ecc->write_page() */ +static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page) +{ + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + u8 *data_buf, *oob_buf; + int i, ret; + + clear_read_regs(nandc); + + data_buf = (u8 *)buf; + oob_buf = chip->oob_poi; + + host->use_ecc = true; + update_rw_regs(host, ecc->steps, false); + + for (i = 0; i < ecc->steps; i++) { + int data_size, oob_size; + + if (i == (ecc->steps - 1)) { + data_size = ecc->size - ((ecc->steps - 1) << 2); + oob_size = (ecc->steps << 2) + host->ecc_bytes_hw + + host->spare_bytes; + } else { + data_size = host->cw_data; + oob_size = ecc->bytes; + } + + config_cw_write_pre(nandc); + + write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size); + + /* + * when ECC is enabled, we don't really need to write anything + * to oob for the first n - 1 codewords since these oob regions + * just contain ECC bytes that's written by the controller + * itself. For the last codeword, we skip the bbm positions and + * write to the free oob area. + */ + if (i == (ecc->steps - 1)) { + oob_buf += host->bbm_size; + + write_data_dma(nandc, FLASH_BUF_ACC + data_size, + oob_buf, oob_size); + } + + config_cw_write_post(nandc); + + data_buf += data_size; + oob_buf += oob_size; + } + + ret = submit_descs(nandc); + if (ret) + dev_err(nandc->dev, "failure to write page\n"); + + free_descs(nandc); + + return ret; +} + +/* implements ecc->write_page_raw() */ +static int qcom_nandc_write_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page) +{ + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + u8 *data_buf, *oob_buf; + int i, ret; + + clear_read_regs(nandc); + + data_buf = (u8 *)buf; + oob_buf = chip->oob_poi; + + host->use_ecc = false; + update_rw_regs(host, ecc->steps, false); + + for (i = 0; i < ecc->steps; i++) { + int data_size1, data_size2, oob_size1, oob_size2; + int reg_off = FLASH_BUF_ACC; + + data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1); + oob_size1 = host->bbm_size; + + if (i == (ecc->steps - 1)) { + data_size2 = ecc->size - data_size1 - + ((ecc->steps - 1) << 2); + oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw + + host->spare_bytes; + } else { + data_size2 = host->cw_data - data_size1; + oob_size2 = host->ecc_bytes_hw + host->spare_bytes; + } + + config_cw_write_pre(nandc); + + write_data_dma(nandc, reg_off, data_buf, data_size1); + reg_off += data_size1; + data_buf += data_size1; + + write_data_dma(nandc, reg_off, oob_buf, oob_size1); + reg_off += oob_size1; + oob_buf += oob_size1; + + write_data_dma(nandc, reg_off, data_buf, data_size2); + reg_off += data_size2; + data_buf += data_size2; + + write_data_dma(nandc, reg_off, oob_buf, oob_size2); + oob_buf += oob_size2; + + config_cw_write_post(nandc); + } + + ret = submit_descs(nandc); + if (ret) + dev_err(nandc->dev, "failure to write raw page\n"); + + free_descs(nandc); + + return ret; +} + +/* + * implements ecc->write_oob() + * + * the NAND controller cannot write only data or only oob within a codeword, + * since ecc is calculated for the combined codeword. we first copy the + * entire contents for the last codeword(data + oob), replace the old oob + * with the new one in chip->oob_poi, and then write the entire codeword. + * this read-copy-write operation results in a slight performance loss. + */ +static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) +{ + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + u8 *oob = chip->oob_poi; + int free_boff; + int data_size, oob_size; + int ret, status = 0; + + host->use_ecc = true; + + ret = copy_last_cw(host, page); + if (ret) + return ret; + + clear_read_regs(nandc); + + /* calculate the data and oob size for the last codeword/step */ + data_size = ecc->size - ((ecc->steps - 1) << 2); + oob_size = ecc->steps << 2; + + free_boff = ecc->layout->oobfree[0].offset; + + /* override new oob content to last codeword */ + memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size); + + set_address(host, host->cw_size * (ecc->steps - 1), page); + update_rw_regs(host, 1, false); + + config_cw_write_pre(nandc); + write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, + data_size + oob_size); + config_cw_write_post(nandc); + + ret = submit_descs(nandc); + + free_descs(nandc); + + if (ret) { + dev_err(nandc->dev, "failure to write oob\n"); + return -EIO; + } + + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + + status = chip->waitfunc(mtd, chip); + + return status & NAND_STATUS_FAIL ? -EIO : 0; +} + +static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int page, ret, bbpos, bad = 0; + u32 flash_status; + + page = (int)(ofs >> chip->page_shift) & chip->pagemask; + + /* + * configure registers for a raw sub page read, the address is set to + * the beginning of the last codeword, we don't care about reading ecc + * portion of oob. we just want the first few bytes from this codeword + * that contains the BBM + */ + host->use_ecc = false; + + ret = copy_last_cw(host, page); + if (ret) + goto err; + + flash_status = le32_to_cpu(nandc->reg_read_buf[0]); + + if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) { + dev_warn(nandc->dev, "error when trying to read BBM\n"); + goto err; + } + + bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1); + + bad = nandc->data_buffer[bbpos] != 0xff; + + if (chip->options & NAND_BUSWIDTH_16) + bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff); +err: + return bad; +} + +static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int page, ret, status = 0; + + clear_read_regs(nandc); + + /* + * to mark the BBM as bad, we flash the entire last codeword with 0s. + * we don't care about the rest of the content in the codeword since + * we aren't going to use this block again + */ + memset(nandc->data_buffer, 0x00, host->cw_size); + + page = (int)(ofs >> chip->page_shift) & chip->pagemask; + + /* prepare write */ + host->use_ecc = false; + set_address(host, host->cw_size * (ecc->steps - 1), page); + update_rw_regs(host, 1, false); + + config_cw_write_pre(nandc); + write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size); + config_cw_write_post(nandc); + + ret = submit_descs(nandc); + + free_descs(nandc); + + if (ret) { + dev_err(nandc->dev, "failure to update BBM\n"); + return -EIO; + } + + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + + status = chip->waitfunc(mtd, chip); + + return status & NAND_STATUS_FAIL ? -EIO : 0; +} + +/* + * the three functions below implement chip->read_byte(), chip->read_buf() + * and chip->write_buf() respectively. these aren't used for + * reading/writing page data, they are used for smaller data like reading + * id, status etc + */ +static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_host *host = to_qcom_nand_host(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + u8 *buf = nandc->data_buffer; + u8 ret = 0x0; + + if (host->last_command == NAND_CMD_STATUS) { + ret = host->status; + + host->status = NAND_STATUS_READY | NAND_STATUS_WP; + + return ret; + } + + if (nandc->buf_start < nandc->buf_count) + ret = buf[nandc->buf_start++]; + + return ret; +} + +static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start); + + memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len); + nandc->buf_start += real_len; +} + +static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf, + int len) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start); + + memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len); + + nandc->buf_start += real_len; +} + +/* we support only one external chip for now */ +static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + + if (chipnr <= 0) + return; + + dev_warn(nandc->dev, "invalid chip select\n"); +} + +/* + * NAND controller page layout info + * + * Layout with ECC enabled: + * + * |----------------------| |---------------------------------| + * | xx.......yy| | *********xx.......yy| + * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy| + * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy| + * | xx.......yy| | *********xx.......yy| + * |----------------------| |---------------------------------| + * codeword 1,2..n-1 codeword n + * <---(528/532 Bytes)--> <-------(528/532 Bytes)---------> + * + * n = Number of codewords in the page + * . = ECC bytes + * * = Spare/free bytes + * x = Unused byte(s) + * y = Reserved byte(s) + * + * 2K page: n = 4, spare = 16 bytes + * 4K page: n = 8, spare = 32 bytes + * 8K page: n = 16, spare = 64 bytes + * + * the qcom nand controller operates at a sub page/codeword level. each + * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively. + * the number of ECC bytes vary based on the ECC strength and the bus width. + * + * the first n - 1 codewords contains 516 bytes of user data, the remaining + * 12/16 bytes consist of ECC and reserved data. The nth codeword contains + * both user data and spare(oobavail) bytes that sum up to 516 bytes. + * + * When we access a page with ECC enabled, the reserved bytes(s) are not + * accessible at all. When reading, we fill up these unreadable positions + * with 0xffs. When writing, the controller skips writing the inaccessible + * bytes. + * + * Layout with ECC disabled: + * + * |------------------------------| |---------------------------------------| + * | yy xx.......| | bb *********xx.......| + * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..| + * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......| + * | yy xx.......| | bb *********xx.......| + * |------------------------------| |---------------------------------------| + * codeword 1,2..n-1 codeword n + * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)-----------> + * + * n = Number of codewords in the page + * . = ECC bytes + * * = Spare/free bytes + * x = Unused byte(s) + * y = Dummy Bad Bock byte(s) + * b = Real Bad Block byte(s) + * size1/size2 = function of codeword size and 'n' + * + * when the ECC block is disabled, one reserved byte (or two for 16 bit bus + * width) is now accessible. For the first n - 1 codewords, these are dummy Bad + * Block Markers. In the last codeword, this position contains the real BBM + * + * In order to have a consistent layout between RAW and ECC modes, we assume + * the following OOB layout arrangement: + * + * |-----------| |--------------------| + * |yyxx.......| |bb*********xx.......| + * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..| + * |yyxx.......| |bb*********xx.......| + * |yyxx.......| |bb*********xx.......| + * |-----------| |--------------------| + * first n - 1 nth OOB region + * OOB regions + * + * n = Number of codewords in the page + * . = ECC bytes + * * = FREE OOB bytes + * y = Dummy bad block byte(s) (inaccessible when ECC enabled) + * x = Unused byte(s) + * b = Real bad block byte(s) (inaccessible when ECC enabled) + * + * This layout is read as is when ECC is disabled. When ECC is enabled, the + * inaccessible Bad Block byte(s) are ignored when we write to a page/oob, + * and assumed as 0xffs when we read a page/oob. The ECC, unused and + * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e, + * ecc->bytes is the sum of the three). + */ + +static struct nand_ecclayout * +qcom_nand_create_layout(struct qcom_nand_host *host) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + struct nand_ecclayout *layout; + int i, j, steps, pos = 0, shift = 0; + + layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL); + if (!layout) + return NULL; + + steps = mtd->writesize / ecc->size; + layout->eccbytes = steps * ecc->bytes; + + layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size; + layout->oobfree[0].length = steps << 2; + + /* + * the oob bytes in the first n - 1 codewords are all grouped together + * in the format: + * DUMMY_BBM + UNUSED + ECC + */ + for (i = 0; i < steps - 1; i++) { + for (j = 0; j < ecc->bytes; j++) + layout->eccpos[pos++] = i * ecc->bytes + j; + } + + /* + * the oob bytes in the last codeword are grouped in the format: + * BBM + FREE OOB + UNUSED + ECC + */ + + /* fill up the bbm positions */ + for (j = 0; j < host->bbm_size; j++) + layout->eccpos[pos++] = i * ecc->bytes + j; + + /* + * fill up the ecc and reserved positions, their indices are offseted + * by the free oob region + */ + shift = layout->oobfree[0].length + host->bbm_size; + + for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++) + layout->eccpos[pos++] = i * ecc->bytes + shift + j; + + return layout; +} + +static int qcom_nand_host_setup(struct qcom_nand_host *host) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); + int cwperpage, bad_block_byte; + bool wide_bus; + int ecc_mode = 1; + + /* + * the controller requires each step consists of 512 bytes of data. + * bail out if DT has populated a wrong step size. + */ + if (ecc->size != NANDC_STEP_SIZE) { + dev_err(nandc->dev, "invalid ecc size\n"); + return -EINVAL; + } + + wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false; + + if (ecc->strength >= 8) { + /* 8 bit ECC defaults to BCH ECC on all platforms */ + host->bch_enabled = true; + ecc_mode = 1; + + if (wide_bus) { + host->ecc_bytes_hw = 14; + host->spare_bytes = 0; + host->bbm_size = 2; + } else { + host->ecc_bytes_hw = 13; + host->spare_bytes = 2; + host->bbm_size = 1; + } + } else { + /* + * if the controller supports BCH for 4 bit ECC, the controller + * uses lesser bytes for ECC. If RS is used, the ECC bytes is + * always 10 bytes + */ + if (nandc->ecc_modes & ECC_BCH_4BIT) { + /* BCH */ + host->bch_enabled = true; + ecc_mode = 0; + + if (wide_bus) { + host->ecc_bytes_hw = 8; + host->spare_bytes = 2; + host->bbm_size = 2; + } else { + host->ecc_bytes_hw = 7; + host->spare_bytes = 4; + host->bbm_size = 1; + } + } else { + /* RS */ + host->ecc_bytes_hw = 10; + + if (wide_bus) { + host->spare_bytes = 0; + host->bbm_size = 2; + } else { + host->spare_bytes = 1; + host->bbm_size = 1; + } + } + } + + /* + * we consider ecc->bytes as the sum of all the non-data content in a + * step. It gives us a clean representation of the oob area (even if + * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit + * ECC and 12 bytes for 4 bit ECC + */ + ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size; + + ecc->read_page = qcom_nandc_read_page; + ecc->read_page_raw = qcom_nandc_read_page_raw; + ecc->read_oob = qcom_nandc_read_oob; + ecc->write_page = qcom_nandc_write_page; + ecc->write_page_raw = qcom_nandc_write_page_raw; + ecc->write_oob = qcom_nandc_write_oob; + + ecc->mode = NAND_ECC_HW; + + ecc->layout = qcom_nand_create_layout(host); + if (!ecc->layout) + return -ENOMEM; + + cwperpage = mtd->writesize / ecc->size; + + /* + * DATA_UD_BYTES varies based on whether the read/write command protects + * spare data with ECC too. We protect spare data by default, so we set + * it to main + spare data, which are 512 and 4 bytes respectively. + */ + host->cw_data = 516; + + /* + * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes + * for 8 bit ECC + */ + host->cw_size = host->cw_data + ecc->bytes; + + if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) { + dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n"); + return -EINVAL; + } + + bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1; + + host->cfg0 = (cwperpage - 1) << CW_PER_PAGE + | host->cw_data << UD_SIZE_BYTES + | 0 << DISABLE_STATUS_AFTER_WRITE + | 5 << NUM_ADDR_CYCLES + | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS + | 0 << STATUS_BFR_READ + | 1 << SET_RD_MODE_AFTER_STATUS + | host->spare_bytes << SPARE_SIZE_BYTES; + + host->cfg1 = 7 << NAND_RECOVERY_CYCLES + | 0 << CS_ACTIVE_BSY + | bad_block_byte << BAD_BLOCK_BYTE_NUM + | 0 << BAD_BLOCK_IN_SPARE_AREA + | 2 << WR_RD_BSY_GAP + | wide_bus << WIDE_FLASH + | host->bch_enabled << ENABLE_BCH_ECC; + + host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE + | host->cw_size << UD_SIZE_BYTES + | 5 << NUM_ADDR_CYCLES + | 0 << SPARE_SIZE_BYTES; + + host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES + | 0 << CS_ACTIVE_BSY + | 17 << BAD_BLOCK_BYTE_NUM + | 1 << BAD_BLOCK_IN_SPARE_AREA + | 2 << WR_RD_BSY_GAP + | wide_bus << WIDE_FLASH + | 1 << DEV0_CFG1_ECC_DISABLE; + + host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE + | 0 << ECC_SW_RESET + | host->cw_data << ECC_NUM_DATA_BYTES + | 1 << ECC_FORCE_CLK_OPEN + | ecc_mode << ECC_MODE + | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH; + + host->ecc_buf_cfg = 0x203 << NUM_STEPS; + + host->clrflashstatus = FS_READY_BSY_N; + host->clrreadstatus = 0xc0; + + dev_dbg(nandc->dev, + "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n", + host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg, + host->cw_size, host->cw_data, ecc->strength, ecc->bytes, + cwperpage); + + return 0; +} + +static int qcom_nandc_alloc(struct qcom_nand_controller *nandc) +{ + int ret; + + ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(nandc->dev, "failed to set DMA mask\n"); + return ret; + } + + /* + * we use the internal buffer for reading ONFI params, reading small + * data like ID and status, and preforming read-copy-write operations + * when writing to a codeword partially. 532 is the maximum possible + * size of a codeword for our nand controller + */ + nandc->buf_size = 532; + + nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, + GFP_KERNEL); + if (!nandc->data_buffer) + return -ENOMEM; + + nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), + GFP_KERNEL); + if (!nandc->regs) + return -ENOMEM; + + nandc->reg_read_buf = devm_kzalloc(nandc->dev, + MAX_REG_RD * sizeof(*nandc->reg_read_buf), + GFP_KERNEL); + if (!nandc->reg_read_buf) + return -ENOMEM; + + nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx"); + if (!nandc->chan) { + dev_err(nandc->dev, "failed to request slave channel\n"); + return -ENODEV; + } + + INIT_LIST_HEAD(&nandc->desc_list); + INIT_LIST_HEAD(&nandc->host_list); + + spin_lock_init(&nandc->controller.lock); + init_waitqueue_head(&nandc->controller.wq); + + return 0; +} + +static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) +{ + dma_release_channel(nandc->chan); +} + +/* one time setup of a few nand controller registers */ +static int qcom_nandc_setup(struct qcom_nand_controller *nandc) +{ + /* kill onenand */ + nandc_write(nandc, SFLASHC_BURST_CFG, 0); + + /* enable ADM DMA */ + nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); + + /* save the original values of these registers */ + nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1); + nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD); + + return 0; +} + +static int qcom_nand_host_init(struct qcom_nand_controller *nandc, + struct qcom_nand_host *host, + struct device_node *dn) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + struct device *dev = nandc->dev; + int ret; + + ret = of_property_read_u32(dn, "reg", &host->cs); + if (ret) { + dev_err(dev, "can't get chip-select\n"); + return -ENXIO; + } + + nand_set_flash_node(chip, dn); + mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs); + mtd->owner = THIS_MODULE; + mtd->dev.parent = dev; + + chip->cmdfunc = qcom_nandc_command; + chip->select_chip = qcom_nandc_select_chip; + chip->read_byte = qcom_nandc_read_byte; + chip->read_buf = qcom_nandc_read_buf; + chip->write_buf = qcom_nandc_write_buf; + + /* + * the bad block marker is readable only when we read the last codeword + * of a page with ECC disabled. currently, the nand_base and nand_bbt + * helpers don't allow us to read BB from a nand chip with ECC + * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad + * and block_markbad helpers until we permanently switch to using + * MTD_OPS_RAW for all drivers (with the help of badblockbits) + */ + chip->block_bad = qcom_nandc_block_bad; + chip->block_markbad = qcom_nandc_block_markbad; + + chip->controller = &nandc->controller; + chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER | + NAND_SKIP_BBTSCAN; + + /* set up initial status value */ + host->status = NAND_STATUS_READY | NAND_STATUS_WP; + + ret = nand_scan_ident(mtd, 1, NULL); + if (ret) + return ret; + + ret = qcom_nand_host_setup(host); + if (ret) + return ret; + + ret = nand_scan_tail(mtd); + if (ret) + return ret; + + return mtd_device_register(mtd, NULL, 0); +} + +/* parse custom DT properties here */ +static int qcom_nandc_parse_dt(struct platform_device *pdev) +{ + struct qcom_nand_controller *nandc = platform_get_drvdata(pdev); + struct device_node *np = nandc->dev->of_node; + int ret; + + ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci); + if (ret) { + dev_err(nandc->dev, "command CRCI unspecified\n"); + return ret; + } + + ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci); + if (ret) { + dev_err(nandc->dev, "data CRCI unspecified\n"); + return ret; + } + + return 0; +} + +static int qcom_nandc_probe(struct platform_device *pdev) +{ + struct qcom_nand_controller *nandc; + struct qcom_nand_host *host; + const void *dev_data; + struct device *dev = &pdev->dev; + struct device_node *dn = dev->of_node, *child; + struct resource *res; + int ret; + + nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL); + if (!nandc) + return -ENOMEM; + + platform_set_drvdata(pdev, nandc); + nandc->dev = dev; + + dev_data = of_device_get_match_data(dev); + if (!dev_data) { + dev_err(&pdev->dev, "failed to get device data\n"); + return -ENODEV; + } + + nandc->ecc_modes = (unsigned long)dev_data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nandc->base = devm_ioremap_resource(dev, res); + if (IS_ERR(nandc->base)) + return PTR_ERR(nandc->base); + + nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start); + + nandc->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(nandc->core_clk)) + return PTR_ERR(nandc->core_clk); + + nandc->aon_clk = devm_clk_get(dev, "aon"); + if (IS_ERR(nandc->aon_clk)) + return PTR_ERR(nandc->aon_clk); + + ret = qcom_nandc_parse_dt(pdev); + if (ret) + return ret; + + ret = qcom_nandc_alloc(nandc); + if (ret) + return ret; + + ret = clk_prepare_enable(nandc->core_clk); + if (ret) + goto err_core_clk; + + ret = clk_prepare_enable(nandc->aon_clk); + if (ret) + goto err_aon_clk; + + ret = qcom_nandc_setup(nandc); + if (ret) + goto err_setup; + + for_each_available_child_of_node(dn, child) { + if (of_device_is_compatible(child, "qcom,nandcs")) { + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) { + of_node_put(child); + ret = -ENOMEM; + goto err_cs_init; + } + + ret = qcom_nand_host_init(nandc, host, child); + if (ret) { + devm_kfree(dev, host); + continue; + } + + list_add_tail(&host->node, &nandc->host_list); + } + } + + if (list_empty(&nandc->host_list)) { + ret = -ENODEV; + goto err_cs_init; + } + + return 0; + +err_cs_init: + list_for_each_entry(host, &nandc->host_list, node) + nand_release(nand_to_mtd(&host->chip)); +err_setup: + clk_disable_unprepare(nandc->aon_clk); +err_aon_clk: + clk_disable_unprepare(nandc->core_clk); +err_core_clk: + qcom_nandc_unalloc(nandc); + + return ret; +} + +static int qcom_nandc_remove(struct platform_device *pdev) +{ + struct qcom_nand_controller *nandc = platform_get_drvdata(pdev); + struct qcom_nand_host *host; + + list_for_each_entry(host, &nandc->host_list, node) + nand_release(nand_to_mtd(&host->chip)); + + qcom_nandc_unalloc(nandc); + + clk_disable_unprepare(nandc->aon_clk); + clk_disable_unprepare(nandc->core_clk); + + return 0; +} + +#define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT) + +/* + * data will hold a struct pointer containing more differences once we support + * more controller variants + */ +static const struct of_device_id qcom_nandc_of_match[] = { + { .compatible = "qcom,ipq806x-nand", + .data = (void *)EBI2_NANDC_ECC_MODES, + }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_nandc_of_match); + +static struct platform_driver qcom_nandc_driver = { + .driver = { + .name = "qcom-nandc", + .of_match_table = qcom_nandc_of_match, + }, + .probe = qcom_nandc_probe, + .remove = qcom_nandc_remove, +}; +module_platform_driver(qcom_nandc_driver); + +MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>"); +MODULE_DESCRIPTION("Qualcomm NAND Controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 01ac74fa3b95..9c9397b54b2c 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -861,9 +861,6 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, chip->ecc.mode = NAND_ECC_SOFT; #endif - if (set->ecc_layout != NULL) - chip->ecc.layout = set->ecc_layout; - if (set->disable_ecc) chip->ecc.mode = NAND_ECC_NONE; diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 51e10a35fe08..1c03eee44f3d 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -60,6 +60,7 @@ #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3) #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4)) #define NFC_REG_SPARE_AREA 0x00A0 +#define NFC_REG_PAT_ID 0x00A4 #define NFC_RAM0_BASE 0x0400 #define NFC_RAM1_BASE 0x0800 @@ -538,6 +539,174 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); } +/* These seed values have been extracted from Allwinner's BSP */ +static const u16 sunxi_nfc_randomizer_page_seeds[] = { + 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72, + 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436, + 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d, + 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130, + 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56, + 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55, + 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb, + 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17, + 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62, + 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064, + 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126, + 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e, + 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3, + 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b, + 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d, + 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db, +}; + +/* + * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds + * have been generated using + * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what + * the randomizer engine does internally before de/scrambling OOB data. + * + * Those tables are statically defined to avoid calculating randomizer state + * at runtime. + */ +static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = { + 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64, + 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409, + 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617, + 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d, + 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91, + 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d, + 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab, + 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8, + 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8, + 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b, + 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5, + 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a, + 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891, + 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36, + 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd, + 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0, +}; + +static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = { + 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6, + 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982, + 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9, + 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07, + 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e, + 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2, + 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c, + 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f, + 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc, + 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e, + 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8, + 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68, + 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d, + 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179, + 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601, + 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd, +}; + +static u16 sunxi_nfc_randomizer_step(u16 state, int count) +{ + state &= 0x7fff; + + /* + * This loop is just a simple implementation of a Fibonacci LFSR using + * the x16 + x15 + 1 polynomial. + */ + while (count--) + state = ((state >> 1) | + (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff; + + return state; +} + +static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc) +{ + const u16 *seeds = sunxi_nfc_randomizer_page_seeds; + int mod = mtd_div_by_ws(mtd->erasesize, mtd); + + if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds)) + mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds); + + if (ecc) { + if (mtd->ecc_step_size == 512) + seeds = sunxi_nfc_randomizer_ecc512_seeds; + else + seeds = sunxi_nfc_randomizer_ecc1024_seeds; + } + + return seeds[page % mod]; +} + +static void sunxi_nfc_randomizer_config(struct mtd_info *mtd, + int page, bool ecc) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); + u16 state; + + if (!(nand->options & NAND_NEED_SCRAMBLING)) + return; + + ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); + state = sunxi_nfc_randomizer_state(mtd, page, ecc); + ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK; + writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL); +} + +static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + + if (!(nand->options & NAND_NEED_SCRAMBLING)) + return; + + writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN, + nfc->regs + NFC_REG_ECC_CTL); +} + +static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + + if (!(nand->options & NAND_NEED_SCRAMBLING)) + return; + + writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN, + nfc->regs + NFC_REG_ECC_CTL); +} + +static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm) +{ + u16 state = sunxi_nfc_randomizer_state(mtd, page, true); + + bbm[0] ^= state; + bbm[1] ^= sunxi_nfc_randomizer_step(state, 8); +} + +static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd, + const uint8_t *buf, int len, + bool ecc, int page) +{ + sunxi_nfc_randomizer_config(mtd, page, ecc); + sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_write_buf(mtd, buf, len); + sunxi_nfc_randomizer_disable(mtd); +} + +static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf, + int len, bool ecc, int page) +{ + sunxi_nfc_randomizer_config(mtd, page, ecc); + sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_read_buf(mtd, buf, len); + sunxi_nfc_randomizer_disable(mtd); +} + static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd) { struct nand_chip *nand = mtd_to_nand(mtd); @@ -574,18 +743,20 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, u8 *data, int data_off, u8 *oob, int oob_off, int *cur_off, - unsigned int *max_bitflips) + unsigned int *max_bitflips, + bool bbm, int page) { struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; + int raw_mode = 0; u32 status; int ret; if (*cur_off != data_off) nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); - sunxi_nfc_read_buf(mtd, NULL, ecc->size); + sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page); if (data_off + ecc->size != oob_off) nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); @@ -594,25 +765,54 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, if (ret) return ret; + sunxi_nfc_randomizer_enable(mtd); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + sunxi_nfc_randomizer_disable(mtd); if (ret) return ret; + *cur_off = oob_off + ecc->bytes + 4; + status = readl(nfc->regs + NFC_REG_ECC_ST); + if (status & NFC_ECC_PAT_FOUND(0)) { + u8 pattern = 0xff; + + if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) + pattern = 0x0; + + memset(data, pattern, ecc->size); + memset(oob, pattern, ecc->bytes + 4); + + return 1; + } + ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0))); memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); - sunxi_nfc_read_buf(mtd, oob, ecc->bytes + 4); + sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page); if (status & NFC_ECC_ERR(0)) { + /* + * Re-read the data with the randomizer disabled to identify + * bitflips in erased pages. + */ + if (nand->options & NAND_NEED_SCRAMBLING) { + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); + nand->read_buf(mtd, data, ecc->size); + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); + nand->read_buf(mtd, oob, ecc->bytes + 4); + } + ret = nand_check_erased_ecc_chunk(data, ecc->size, oob, ecc->bytes + 4, NULL, 0, ecc->strength); + if (ret >= 0) + raw_mode = 1; } else { /* * The engine protects 4 bytes of OOB data per chunk. @@ -620,6 +820,10 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, */ sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)), oob); + + /* De-randomize the Bad Block Marker. */ + if (bbm && nand->options & NAND_NEED_SCRAMBLING) + sunxi_nfc_randomize_bbm(mtd, page, oob); } if (ret < 0) { @@ -629,13 +833,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, *max_bitflips = max_t(unsigned int, *max_bitflips, ret); } - *cur_off = oob_off + ecc->bytes + 4; - - return 0; + return raw_mode; } static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, - u8 *oob, int *cur_off) + u8 *oob, int *cur_off, + bool randomize, int page) { struct nand_chip *nand = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &nand->ecc; @@ -649,7 +852,11 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, nand->cmdfunc(mtd, NAND_CMD_RNDOUT, offset + mtd->writesize, -1); - sunxi_nfc_read_buf(mtd, oob + offset, len); + if (!randomize) + sunxi_nfc_read_buf(mtd, oob + offset, len); + else + sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len, + false, page); *cur_off = mtd->oobsize + mtd->writesize; } @@ -662,7 +869,8 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, const u8 *data, int data_off, const u8 *oob, int oob_off, - int *cur_off) + int *cur_off, bool bbm, + int page) { struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); @@ -672,11 +880,20 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, if (data_off != *cur_off) nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1); - sunxi_nfc_write_buf(mtd, data, ecc->size); + sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page); /* Fill OOB data in */ - writel(sunxi_nfc_buf_to_user_data(oob), - nfc->regs + NFC_REG_USER_DATA(0)); + if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) { + u8 user_data[4]; + + memcpy(user_data, oob, 4); + sunxi_nfc_randomize_bbm(mtd, page, user_data); + writel(sunxi_nfc_buf_to_user_data(user_data), + nfc->regs + NFC_REG_USER_DATA(0)); + } else { + writel(sunxi_nfc_buf_to_user_data(oob), + nfc->regs + NFC_REG_USER_DATA(0)); + } if (data_off + ecc->size != oob_off) nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1); @@ -685,11 +902,13 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, if (ret) return ret; + sunxi_nfc_randomizer_enable(mtd); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + sunxi_nfc_randomizer_disable(mtd); if (ret) return ret; @@ -699,7 +918,8 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, } static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, - u8 *oob, int *cur_off) + u8 *oob, int *cur_off, + int page) { struct nand_chip *nand = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &nand->ecc; @@ -713,7 +933,7 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, nand->cmdfunc(mtd, NAND_CMD_RNDIN, offset + mtd->writesize, -1); - sunxi_nfc_write_buf(mtd, oob + offset, len); + sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page); *cur_off = mtd->oobsize + mtd->writesize; } @@ -725,6 +945,7 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc = &chip->ecc; unsigned int max_bitflips = 0; int ret, i, cur_off = 0; + bool raw_mode = false; sunxi_nfc_hw_ecc_enable(mtd); @@ -736,13 +957,17 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, oob_off + mtd->writesize, - &cur_off, &max_bitflips); - if (ret) + &cur_off, &max_bitflips, + !i, page); + if (ret < 0) return ret; + else if (ret) + raw_mode = true; } if (oob_required) - sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off); + sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off, + !raw_mode, page); sunxi_nfc_hw_ecc_disable(mtd); @@ -767,13 +992,14 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, oob_off + mtd->writesize, - &cur_off); + &cur_off, !i, page); if (ret) return ret; } - if (oob_required) - sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off); + if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) + sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + &cur_off, page); sunxi_nfc_hw_ecc_disable(mtd); @@ -788,6 +1014,7 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc = &chip->ecc; unsigned int max_bitflips = 0; int ret, i, cur_off = 0; + bool raw_mode = false; sunxi_nfc_hw_ecc_enable(mtd); @@ -799,13 +1026,16 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, oob_off, &cur_off, - &max_bitflips); - if (ret) + &max_bitflips, !i, page); + if (ret < 0) return ret; + else if (ret) + raw_mode = true; } if (oob_required) - sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off); + sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off, + !raw_mode, page); sunxi_nfc_hw_ecc_disable(mtd); @@ -829,13 +1059,15 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd, const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4)); ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, - oob, oob_off, &cur_off); + oob, oob_off, &cur_off, + false, page); if (ret) return ret; } - if (oob_required) - sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off); + if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) + sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + &cur_off, page); sunxi_nfc_hw_ecc_disable(mtd); @@ -1345,6 +1577,9 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, if (nand->bbt_options & NAND_BBT_USE_FLASH) nand->bbt_options |= NAND_BBT_NO_OOB; + if (nand->options & NAND_NEED_SCRAMBLING) + nand->options |= NAND_NO_SUBPAGE_WRITE; + ret = sunxi_nand_chip_init_timings(chip, np); if (ret) { dev_err(dev, "could not configure chip timings: %d\n", ret); diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 034420f313d5..293feb19b0b1 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -795,8 +795,6 @@ static int vf610_nfc_probe(struct platform_device *pdev) goto error; } - /* propagate ecc.layout to mtd_info */ - mtd->ecclayout = chip->ecc.layout; chip->ecc.read_page = vf610_nfc_read_page; chip->ecc.write_page = vf610_nfc_write_page; diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 43b3392ffee7..af28bb3ae7cf 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -1124,11 +1124,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, (int)len); - if (ops->mode == MTD_OPS_AUTO_OOB) - oobsize = this->ecclayout->oobavail; - else - oobsize = mtd->oobsize; - + oobsize = mtd_oobavail(mtd, ops); oobcolumn = from & (mtd->oobsize - 1); /* Do not allow reads past end of device */ @@ -1229,11 +1225,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, (int)len); - if (ops->mode == MTD_OPS_AUTO_OOB) - oobsize = this->ecclayout->oobavail; - else - oobsize = mtd->oobsize; - + oobsize = mtd_oobavail(mtd, ops); oobcolumn = from & (mtd->oobsize - 1); /* Do not allow reads past end of device */ @@ -1365,7 +1357,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, ops->oobretlen = 0; if (mode == MTD_OPS_AUTO_OOB) - oobsize = this->ecclayout->oobavail; + oobsize = mtd->oobavail; else oobsize = mtd->oobsize; @@ -1885,12 +1877,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, /* Check zero length */ if (!len) return 0; - - if (ops->mode == MTD_OPS_AUTO_OOB) - oobsize = this->ecclayout->oobavail; - else - oobsize = mtd->oobsize; - + oobsize = mtd_oobavail(mtd, ops); oobcolumn = to & (mtd->oobsize - 1); column = to & (mtd->writesize - 1); @@ -2063,7 +2050,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, ops->oobretlen = 0; if (mode == MTD_OPS_AUTO_OOB) - oobsize = this->ecclayout->oobavail; + oobsize = mtd->oobavail; else oobsize = mtd->oobsize; @@ -2599,6 +2586,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) */ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs) { + struct onenand_chip *this = mtd->priv; int ret; ret = onenand_block_isbad(mtd, ofs); @@ -2610,7 +2598,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs) } onenand_get_device(mtd, FL_WRITING); - ret = mtd_block_markbad(mtd, ofs); + ret = this->block_markbad(mtd, ofs); onenand_release_device(mtd); return ret; } @@ -4049,12 +4037,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) * The number of bytes available for a client to place data into * the out of band area */ - this->ecclayout->oobavail = 0; + mtd->oobavail = 0; for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && this->ecclayout->oobfree[i].length; i++) - this->ecclayout->oobavail += - this->ecclayout->oobfree[i].length; - mtd->oobavail = this->ecclayout->oobavail; + mtd->oobavail += this->ecclayout->oobfree[i].length; mtd->ecclayout = this->ecclayout; mtd->ecc_strength = 1; diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c index 08d0085f3e93..680188a88130 100644 --- a/drivers/mtd/onenand/onenand_bbt.c +++ b/drivers/mtd/onenand/onenand_bbt.c @@ -179,7 +179,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) * by the onenand_release function. * */ -int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) +static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) { struct onenand_chip *this = mtd->priv; struct bbm_info *bbm = this->bbm; @@ -247,6 +247,3 @@ int onenand_default_bbt(struct mtd_info *mtd) return onenand_scan_bbt(mtd, bbm->badblock_pattern); } - -EXPORT_SYMBOL(onenand_scan_bbt); -EXPORT_SYMBOL(onenand_default_bbt); diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 0dc927540b3d..d42c98e1f581 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -9,6 +9,7 @@ if MTD_SPI_NOR config MTD_MT81xx_NOR tristate "Mediatek MT81xx SPI NOR flash controller" + depends on HAS_IOMEM help This enables access to SPI NOR flash, using MT81xx SPI NOR flash controller. This controller does not support generic SPI BUS, it only @@ -30,7 +31,7 @@ config MTD_SPI_NOR_USE_4K_SECTORS config SPI_FSL_QUADSPI tristate "Freescale Quad SPI controller" - depends on ARCH_MXC || COMPILE_TEST + depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST depends on HAS_IOMEM help This enables support for the Quad SPI controller in master mode. diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 54640f1eb3a1..9ab2b51d54b8 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -213,6 +213,7 @@ enum fsl_qspi_devtype { FSL_QUADSPI_IMX6SX, FSL_QUADSPI_IMX7D, FSL_QUADSPI_IMX6UL, + FSL_QUADSPI_LS1021A, }; struct fsl_qspi_devtype_data { @@ -258,6 +259,14 @@ static struct fsl_qspi_devtype_data imx6ul_data = { | QUADSPI_QUIRK_4X_INT_CLK, }; +static struct fsl_qspi_devtype_data ls1021a_data = { + .devtype = FSL_QUADSPI_LS1021A, + .rxfifo = 128, + .txfifo = 64, + .ahb_buf_size = 1024, + .driver_data = 0, +}; + #define FSL_QSPI_MAX_CHIP 4 struct fsl_qspi { struct spi_nor nor[FSL_QSPI_MAX_CHIP]; @@ -275,6 +284,7 @@ struct fsl_qspi { u32 clk_rate; unsigned int chip_base_addr; /* We may support two chips. */ bool has_second_chip; + bool big_endian; struct mutex lock; struct pm_qos_request pm_qos_req; }; @@ -300,6 +310,28 @@ static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) } /* + * R/W functions for big- or little-endian registers: + * The qSPI controller's endian is independent of the CPU core's endian. + * So far, although the CPU core is little-endian but the qSPI have two + * versions for big-endian and little-endian. + */ +static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) +{ + if (q->big_endian) + iowrite32be(val, addr); + else + iowrite32(val, addr); +} + +static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) +{ + if (q->big_endian) + return ioread32be(addr); + else + return ioread32(addr); +} + +/* * An IC bug makes us to re-arrange the 32-bit data. * The following chips, such as IMX6SLX, have fixed this bug. */ @@ -310,14 +342,14 @@ static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q) { - writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); - writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); + qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); + qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); } static inline void fsl_qspi_lock_lut(struct fsl_qspi *q) { - writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); - writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); + qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); + qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); } static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) @@ -326,8 +358,8 @@ static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) u32 reg; /* clear interrupt */ - reg = readl(q->iobase + QUADSPI_FR); - writel(reg, q->iobase + QUADSPI_FR); + reg = qspi_readl(q, q->iobase + QUADSPI_FR); + qspi_writel(q, reg, q->iobase + QUADSPI_FR); if (reg & QUADSPI_FR_TFF_MASK) complete(&q->c); @@ -348,7 +380,7 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q) /* Clear all the LUT table */ for (i = 0; i < QUADSPI_LUT_NUM; i++) - writel(0, base + QUADSPI_LUT_BASE + i * 4); + qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4); /* Quad Read */ lut_base = SEQID_QUAD_READ * 4; @@ -364,14 +396,15 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q) dummy = 8; } - writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), + qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), base + QUADSPI_LUT(lut_base)); - writel(LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo), + qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo), base + QUADSPI_LUT(lut_base + 1)); /* Write enable */ lut_base = SEQID_WREN * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base)); + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN), + base + QUADSPI_LUT(lut_base)); /* Page Program */ lut_base = SEQID_PP * 4; @@ -385,13 +418,15 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q) addrlen = ADDR32BIT; } - writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), + qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), base + QUADSPI_LUT(lut_base)); - writel(LUT0(FSL_WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1)); + qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0), + base + QUADSPI_LUT(lut_base + 1)); /* Read Status */ lut_base = SEQID_RDSR * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(FSL_READ, PAD1, 0x1), + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) | + LUT1(FSL_READ, PAD1, 0x1), base + QUADSPI_LUT(lut_base)); /* Erase a sector */ @@ -400,40 +435,46 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q) cmd = q->nor[0].erase_opcode; addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT; - writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), + qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen), base + QUADSPI_LUT(lut_base)); /* Erase the whole chip */ lut_base = SEQID_CHIP_ERASE * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), base + QUADSPI_LUT(lut_base)); /* READ ID */ lut_base = SEQID_RDID * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(FSL_READ, PAD1, 0x8), + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) | + LUT1(FSL_READ, PAD1, 0x8), base + QUADSPI_LUT(lut_base)); /* Write Register */ lut_base = SEQID_WRSR * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(FSL_WRITE, PAD1, 0x2), + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) | + LUT1(FSL_WRITE, PAD1, 0x2), base + QUADSPI_LUT(lut_base)); /* Read Configuration Register */ lut_base = SEQID_RDCR * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(FSL_READ, PAD1, 0x1), + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) | + LUT1(FSL_READ, PAD1, 0x1), base + QUADSPI_LUT(lut_base)); /* Write disable */ lut_base = SEQID_WRDI * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base)); + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI), + base + QUADSPI_LUT(lut_base)); /* Enter 4 Byte Mode (Micron) */ lut_base = SEQID_EN4B * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base)); + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B), + base + QUADSPI_LUT(lut_base)); /* Enter 4 Byte Mode (Spansion) */ lut_base = SEQID_BRWR * 4; - writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base)); + qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR), + base + QUADSPI_LUT(lut_base)); fsl_qspi_lock_lut(q); } @@ -488,15 +529,16 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len) q->chip_base_addr, addr, len, cmd); /* save the reg */ - reg = readl(base + QUADSPI_MCR); + reg = qspi_readl(q, base + QUADSPI_MCR); - writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR); - writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, + qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, + base + QUADSPI_SFAR); + qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT); - writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); + qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); do { - reg2 = readl(base + QUADSPI_SR); + reg2 = qspi_readl(q, base + QUADSPI_SR); if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) { udelay(1); dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2); @@ -507,21 +549,22 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len) /* trigger the LUT now */ seqid = fsl_qspi_get_seqid(q, cmd); - writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR); + qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, + base + QUADSPI_IPCR); /* Wait for the interrupt. */ if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) { dev_err(q->dev, "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n", - cmd, addr, readl(base + QUADSPI_FR), - readl(base + QUADSPI_SR)); + cmd, addr, qspi_readl(q, base + QUADSPI_FR), + qspi_readl(q, base + QUADSPI_SR)); err = -ETIMEDOUT; } else { err = 0; } /* restore the MCR */ - writel(reg, base + QUADSPI_MCR); + qspi_writel(q, reg, base + QUADSPI_MCR); return err; } @@ -533,7 +576,7 @@ static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf) int i = 0; while (len > 0) { - tmp = readl(q->iobase + QUADSPI_RBDR + i * 4); + tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4); tmp = fsl_qspi_endian_xchg(q, tmp); dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n", q->chip_base_addr, tmp); @@ -561,9 +604,9 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q) { u32 reg; - reg = readl(q->iobase + QUADSPI_MCR); + reg = qspi_readl(q, q->iobase + QUADSPI_MCR); reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; - writel(reg, q->iobase + QUADSPI_MCR); + qspi_writel(q, reg, q->iobase + QUADSPI_MCR); /* * The minimum delay : 1 AHB + 2 SFCK clocks. @@ -572,7 +615,7 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q) udelay(1); reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); - writel(reg, q->iobase + QUADSPI_MCR); + qspi_writel(q, reg, q->iobase + QUADSPI_MCR); } static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, @@ -586,20 +629,20 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, q->chip_base_addr, to, count); /* clear the TX FIFO. */ - tmp = readl(q->iobase + QUADSPI_MCR); - writel(tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); + tmp = qspi_readl(q, q->iobase + QUADSPI_MCR); + qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); /* fill the TX data to the FIFO */ for (j = 0, i = ((count + 3) / 4); j < i; j++) { tmp = fsl_qspi_endian_xchg(q, *txbuf); - writel(tmp, q->iobase + QUADSPI_TBDR); + qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); txbuf++; } /* fill the TXFIFO upto 16 bytes for i.MX7d */ if (needs_fill_txfifo(q)) for (; i < 4; i++) - writel(tmp, q->iobase + QUADSPI_TBDR); + qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); /* Trigger it */ ret = fsl_qspi_runcmd(q, opcode, to, count); @@ -615,10 +658,10 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q) int nor_size = q->nor_size; void __iomem *base = q->iobase; - writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); - writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); - writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); - writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); + qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); + qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); + qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); + qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); } /* @@ -640,24 +683,26 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q) int seqid; /* AHB configuration for access buffer 0/1/2 .*/ - writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); - writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); - writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); + qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); + qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); + qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); /* * Set ADATSZ with the maximum AHB buffer size to improve the * read performance. */ - writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8) - << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR); + qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | + ((q->devtype_data->ahb_buf_size / 8) + << QUADSPI_BUF3CR_ADATSZ_SHIFT), + base + QUADSPI_BUF3CR); /* We only use the buffer3 */ - writel(0, base + QUADSPI_BUF0IND); - writel(0, base + QUADSPI_BUF1IND); - writel(0, base + QUADSPI_BUF2IND); + qspi_writel(q, 0, base + QUADSPI_BUF0IND); + qspi_writel(q, 0, base + QUADSPI_BUF1IND); + qspi_writel(q, 0, base + QUADSPI_BUF2IND); /* Set the default lut sequence for AHB Read. */ seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); - writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT, + qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, q->iobase + QUADSPI_BFGENCR); } @@ -713,7 +758,7 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q) return ret; /* Reset the module */ - writel(QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, + qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, base + QUADSPI_MCR); udelay(1); @@ -721,24 +766,24 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q) fsl_qspi_init_lut(q); /* Disable the module */ - writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, + qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, base + QUADSPI_MCR); - reg = readl(base + QUADSPI_SMPR); - writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK + reg = qspi_readl(q, base + QUADSPI_SMPR); + qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK | QUADSPI_SMPR_FSPHS_MASK | QUADSPI_SMPR_HSENA_MASK | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); /* Enable the module */ - writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, + qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, base + QUADSPI_MCR); /* clear all interrupt status */ - writel(0xffffffff, q->iobase + QUADSPI_FR); + qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); /* enable the interrupt */ - writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); + qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); return 0; } @@ -776,6 +821,7 @@ static const struct of_device_id fsl_qspi_dt_ids[] = { { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, }, { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, }, { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, }, + { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); @@ -954,6 +1000,7 @@ static int fsl_qspi_probe(struct platform_device *pdev) if (IS_ERR(q->iobase)) return PTR_ERR(q->iobase); + q->big_endian = of_property_read_bool(np, "big-endian"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI-memory"); if (!devm_request_mem_region(dev, res->start, resource_size(res), @@ -1101,8 +1148,8 @@ static int fsl_qspi_remove(struct platform_device *pdev) } /* disable the hardware */ - writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); - writel(0x0, q->iobase + QUADSPI_RSER); + qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); + qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); mutex_destroy(&q->lock); diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index d5f850d035bb..8bed1a4cb79c 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -371,8 +371,8 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, return ret; } -static int __init mtk_nor_init(struct mt8173_nor *mt8173_nor, - struct device_node *flash_node) +static int mtk_nor_init(struct mt8173_nor *mt8173_nor, + struct device_node *flash_node) { int ret; struct spi_nor *nor; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index ed0c19c558b5..157841dc3e99 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -61,14 +61,20 @@ struct flash_info { u16 addr_width; u16 flags; -#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */ -#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */ -#define SST_WRITE 0x04 /* use SST byte programming */ -#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */ -#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */ -#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */ -#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */ -#define USE_FSR 0x80 /* use flag status register */ +#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */ +#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */ +#define SST_WRITE BIT(2) /* use SST byte programming */ +#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */ +#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */ +#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */ +#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */ +#define USE_FSR BIT(7) /* use flag status register */ +#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */ +#define SPI_NOR_HAS_TB BIT(9) /* + * Flash SR has Top/Bottom (TB) protect + * bit. Must be used with + * SPI_NOR_HAS_LOCK. + */ }; #define JEDEC_MFR(info) ((info)->id[0]) @@ -434,32 +440,58 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, } else { pow = ((sr & mask) ^ mask) >> shift; *len = mtd->size >> pow; - *ofs = mtd->size - *len; + if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB) + *ofs = 0; + else + *ofs = mtd->size - *len; } } /* - * Return 1 if the entire region is locked, 0 otherwise + * Return 1 if the entire region is locked (if @locked is true) or unlocked (if + * @locked is false); 0 otherwise */ -static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, - u8 sr) +static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, + u8 sr, bool locked) { loff_t lock_offs; uint64_t lock_len; + if (!len) + return 1; + stm_get_locked_range(nor, sr, &lock_offs, &lock_len); - return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs); + if (locked) + /* Requested range is a sub-range of locked range */ + return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs); + else + /* Requested range does not overlap with locked range */ + return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs); +} + +static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, + u8 sr) +{ + return stm_check_lock_status_sr(nor, ofs, len, sr, true); +} + +static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, + u8 sr) +{ + return stm_check_lock_status_sr(nor, ofs, len, sr, false); } /* * Lock a region of the flash. Compatible with ST Micro and similar flash. - * Supports only the block protection bits BP{0,1,2} in the status register + * Supports the block protection bits BP{0,1,2} in the status register * (SR). Does not support these features found in newer SR bitfields: - * - TB: top/bottom protect - only handle TB=0 (top protect) * - SEC: sector/block protect - only handle SEC=0 (block protect) * - CMP: complement protect - only support CMP=0 (range is not complemented) * + * Support for the following is provided conditionally for some flash: + * - TB: top/bottom protect + * * Sample table portion for 8MB flash (Winbond w25q64fw): * * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion @@ -472,6 +504,13 @@ static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2 * X | X | 1 | 1 | 1 | 8 MB | ALL + * ------|-------|-------|-------|-------|---------------|------------------- + * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64 + * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32 + * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16 + * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8 + * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4 + * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2 * * Returns negative on errors, 0 on success. */ @@ -481,20 +520,39 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) int status_old, status_new; u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 shift = ffs(mask) - 1, pow, val; + loff_t lock_len; + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; + bool use_top; int ret; status_old = read_sr(nor); if (status_old < 0) return status_old; - /* SPI NOR always locks to the end */ - if (ofs + len != mtd->size) { - /* Does combined region extend to end? */ - if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len, - status_old)) - return -EINVAL; - len = mtd->size - ofs; - } + /* If nothing in our range is unlocked, we don't need to do anything */ + if (stm_is_locked_sr(nor, ofs, len, status_old)) + return 0; + + /* If anything below us is unlocked, we can't use 'bottom' protection */ + if (!stm_is_locked_sr(nor, 0, ofs, status_old)) + can_be_bottom = false; + + /* If anything above us is unlocked, we can't use 'top' protection */ + if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len), + status_old)) + can_be_top = false; + + if (!can_be_bottom && !can_be_top) + return -EINVAL; + + /* Prefer top, if both are valid */ + use_top = can_be_top; + + /* lock_len: length of region that should end up locked */ + if (use_top) + lock_len = mtd->size - ofs; + else + lock_len = ofs + len; /* * Need smallest pow such that: @@ -505,7 +563,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) * * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len)) */ - pow = ilog2(mtd->size) - ilog2(len); + pow = ilog2(mtd->size) - ilog2(lock_len); val = mask - (pow << shift); if (val & ~mask) return -EINVAL; @@ -513,10 +571,20 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) if (!(val & mask)) return -EINVAL; - status_new = (status_old & ~mask) | val; + status_new = (status_old & ~mask & ~SR_TB) | val; + + /* Disallow further writes if WP pin is asserted */ + status_new |= SR_SRWD; + + if (!use_top) + status_new |= SR_TB; + + /* Don't bother if they're the same */ + if (status_new == status_old) + return 0; /* Only modify protection if it will not unlock other areas */ - if ((status_new & mask) <= (status_old & mask)) + if ((status_new & mask) < (status_old & mask)) return -EINVAL; write_enable(nor); @@ -537,17 +605,40 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) int status_old, status_new; u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 shift = ffs(mask) - 1, pow, val; + loff_t lock_len; + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; + bool use_top; int ret; status_old = read_sr(nor); if (status_old < 0) return status_old; - /* Cannot unlock; would unlock larger region than requested */ - if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize, - status_old)) + /* If nothing in our range is locked, we don't need to do anything */ + if (stm_is_unlocked_sr(nor, ofs, len, status_old)) + return 0; + + /* If anything below us is locked, we can't use 'top' protection */ + if (!stm_is_unlocked_sr(nor, 0, ofs, status_old)) + can_be_top = false; + + /* If anything above us is locked, we can't use 'bottom' protection */ + if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len), + status_old)) + can_be_bottom = false; + + if (!can_be_bottom && !can_be_top) return -EINVAL; + /* Prefer top, if both are valid */ + use_top = can_be_top; + + /* lock_len: length of region that should remain locked */ + if (use_top) + lock_len = mtd->size - (ofs + len); + else + lock_len = ofs; + /* * Need largest pow such that: * @@ -557,8 +648,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) * * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len)) */ - pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len)); - if (ofs + len == mtd->size) { + pow = ilog2(mtd->size) - order_base_2(lock_len); + if (lock_len == 0) { val = 0; /* fully unlocked */ } else { val = mask - (pow << shift); @@ -567,10 +658,21 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) return -EINVAL; } - status_new = (status_old & ~mask) | val; + status_new = (status_old & ~mask & ~SR_TB) | val; + + /* Don't protect status register if we're fully unlocked */ + if (lock_len == mtd->size) + status_new &= ~SR_SRWD; + + if (!use_top) + status_new |= SR_TB; + + /* Don't bother if they're the same */ + if (status_new == status_old) + return 0; /* Only modify protection if it will not lock other areas */ - if ((status_new & mask) >= (status_old & mask)) + if ((status_new & mask) > (status_old & mask)) return -EINVAL; write_enable(nor); @@ -762,8 +864,8 @@ static const struct flash_info spi_nor_ids[] = { { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) }, { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, - { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) }, - { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) }, + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, @@ -797,6 +899,7 @@ static const struct flash_info spi_nor_ids[] = { { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, + { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) }, { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) }, { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) }, @@ -860,11 +963,23 @@ static const struct flash_info spi_nor_ids[] = { { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, - { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { + "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, - { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { + "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { + "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, @@ -1100,45 +1215,6 @@ static int spansion_quad_enable(struct spi_nor *nor) return 0; } -static int micron_quad_enable(struct spi_nor *nor) -{ - int ret; - u8 val; - - ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1); - if (ret < 0) { - dev_err(nor->dev, "error %d reading EVCR\n", ret); - return ret; - } - - write_enable(nor); - - /* set EVCR, enable quad I/O */ - nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON; - ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1); - if (ret < 0) { - dev_err(nor->dev, "error while writing EVCR register\n"); - return ret; - } - - ret = spi_nor_wait_till_ready(nor); - if (ret) - return ret; - - /* read EVCR and check it */ - ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1); - if (ret < 0) { - dev_err(nor->dev, "error %d reading EVCR\n", ret); - return ret; - } - if (val & EVCR_QUAD_EN_MICRON) { - dev_err(nor->dev, "Micron EVCR Quad bit not clear\n"); - return -EINVAL; - } - - return 0; -} - static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info) { int status; @@ -1152,12 +1228,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info) } return status; case SNOR_MFR_MICRON: - status = micron_quad_enable(nor); - if (status) { - dev_err(nor->dev, "Micron quad-read not enabled\n"); - return -EINVAL; - } - return status; + return 0; default: status = spansion_quad_enable(nor); if (status) { @@ -1233,9 +1304,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || JEDEC_MFR(info) == SNOR_MFR_INTEL || - JEDEC_MFR(info) == SNOR_MFR_SST) { + JEDEC_MFR(info) == SNOR_MFR_SST || + info->flags & SPI_NOR_HAS_LOCK) { write_enable(nor); write_sr(nor, 0); + spi_nor_wait_till_ready(nor); } if (!mtd->name) @@ -1249,7 +1322,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) mtd->_read = spi_nor_read; /* NOR protection support for STmicro/Micron chips and similar */ - if (JEDEC_MFR(info) == SNOR_MFR_MICRON) { + if (JEDEC_MFR(info) == SNOR_MFR_MICRON || + info->flags & SPI_NOR_HAS_LOCK) { nor->flash_lock = stm_lock; nor->flash_unlock = stm_unlock; nor->flash_is_locked = stm_is_locked; @@ -1269,6 +1343,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) if (info->flags & USE_FSR) nor->flags |= SNOR_F_USE_FSR; + if (info->flags & SPI_NOR_HAS_TB) + nor->flags |= SNOR_F_HAS_SR_TB; #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS /* prefer "small sector" erase if possible */ diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c index 31762120eb56..1cb3f7758fb6 100644 --- a/drivers/mtd/tests/oobtest.c +++ b/drivers/mtd/tests/oobtest.c @@ -215,19 +215,19 @@ static int verify_eraseblock(int ebnum) pr_info("ignoring error as within bitflip_limit\n"); } - if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { + if (use_offset != 0 || use_len < mtd->oobavail) { int k; ops.mode = MTD_OPS_AUTO_OOB; ops.len = 0; ops.retlen = 0; - ops.ooblen = mtd->ecclayout->oobavail; + ops.ooblen = mtd->oobavail; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd_read_oob(mtd, addr, &ops); - if (err || ops.oobretlen != mtd->ecclayout->oobavail) { + if (err || ops.oobretlen != mtd->oobavail) { pr_err("error: readoob failed at %#llx\n", (long long)addr); errcnt += 1; @@ -244,7 +244,7 @@ static int verify_eraseblock(int ebnum) /* verify post-(use_offset + use_len) area for 0xff */ k = use_offset + use_len; bitflips += memffshow(addr, k, readbuf + k, - mtd->ecclayout->oobavail - k); + mtd->oobavail - k); if (bitflips > bitflip_limit) { pr_err("error: verify failed at %#llx\n", @@ -269,8 +269,8 @@ static int verify_eraseblock_in_one_go(int ebnum) struct mtd_oob_ops ops; int err = 0; loff_t addr = (loff_t)ebnum * mtd->erasesize; - size_t len = mtd->ecclayout->oobavail * pgcnt; - size_t oobavail = mtd->ecclayout->oobavail; + size_t len = mtd->oobavail * pgcnt; + size_t oobavail = mtd->oobavail; size_t bitflips; int i; @@ -394,8 +394,8 @@ static int __init mtd_oobtest_init(void) goto out; use_offset = 0; - use_len = mtd->ecclayout->oobavail; - use_len_max = mtd->ecclayout->oobavail; + use_len = mtd->oobavail; + use_len_max = mtd->oobavail; vary_offset = 0; /* First test: write all OOB, read it back and verify */ @@ -460,8 +460,8 @@ static int __init mtd_oobtest_init(void) /* Write all eraseblocks */ use_offset = 0; - use_len = mtd->ecclayout->oobavail; - use_len_max = mtd->ecclayout->oobavail; + use_len = mtd->oobavail; + use_len_max = mtd->oobavail; vary_offset = 1; prandom_seed_state(&rnd_state, 5); @@ -471,8 +471,8 @@ static int __init mtd_oobtest_init(void) /* Check all eraseblocks */ use_offset = 0; - use_len = mtd->ecclayout->oobavail; - use_len_max = mtd->ecclayout->oobavail; + use_len = mtd->oobavail; + use_len_max = mtd->oobavail; vary_offset = 1; prandom_seed_state(&rnd_state, 5); err = verify_all_eraseblocks(); @@ -480,8 +480,8 @@ static int __init mtd_oobtest_init(void) goto out; use_offset = 0; - use_len = mtd->ecclayout->oobavail; - use_len_max = mtd->ecclayout->oobavail; + use_len = mtd->oobavail; + use_len_max = mtd->oobavail; vary_offset = 0; /* Fourth test: try to write off end of device */ @@ -501,7 +501,7 @@ static int __init mtd_oobtest_init(void) ops.retlen = 0; ops.ooblen = 1; ops.oobretlen = 0; - ops.ooboffs = mtd->ecclayout->oobavail; + ops.ooboffs = mtd->oobavail; ops.datbuf = NULL; ops.oobbuf = writebuf; pr_info("attempting to start write past end of OOB\n"); @@ -521,7 +521,7 @@ static int __init mtd_oobtest_init(void) ops.retlen = 0; ops.ooblen = 1; ops.oobretlen = 0; - ops.ooboffs = mtd->ecclayout->oobavail; + ops.ooboffs = mtd->oobavail; ops.datbuf = NULL; ops.oobbuf = readbuf; pr_info("attempting to start read past end of OOB\n"); @@ -543,7 +543,7 @@ static int __init mtd_oobtest_init(void) ops.mode = MTD_OPS_AUTO_OOB; ops.len = 0; ops.retlen = 0; - ops.ooblen = mtd->ecclayout->oobavail + 1; + ops.ooblen = mtd->oobavail + 1; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; @@ -563,7 +563,7 @@ static int __init mtd_oobtest_init(void) ops.mode = MTD_OPS_AUTO_OOB; ops.len = 0; ops.retlen = 0; - ops.ooblen = mtd->ecclayout->oobavail + 1; + ops.ooblen = mtd->oobavail + 1; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; @@ -587,7 +587,7 @@ static int __init mtd_oobtest_init(void) ops.mode = MTD_OPS_AUTO_OOB; ops.len = 0; ops.retlen = 0; - ops.ooblen = mtd->ecclayout->oobavail; + ops.ooblen = mtd->oobavail; ops.oobretlen = 0; ops.ooboffs = 1; ops.datbuf = NULL; @@ -607,7 +607,7 @@ static int __init mtd_oobtest_init(void) ops.mode = MTD_OPS_AUTO_OOB; ops.len = 0; ops.retlen = 0; - ops.ooblen = mtd->ecclayout->oobavail; + ops.ooblen = mtd->oobavail; ops.oobretlen = 0; ops.ooboffs = 1; ops.datbuf = NULL; @@ -638,7 +638,7 @@ static int __init mtd_oobtest_init(void) for (i = 0; i < ebcnt - 1; ++i) { int cnt = 2; int pg; - size_t sz = mtd->ecclayout->oobavail; + size_t sz = mtd->oobavail; if (bbt[i] || bbt[i + 1]) continue; addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize; @@ -673,13 +673,12 @@ static int __init mtd_oobtest_init(void) for (i = 0; i < ebcnt - 1; ++i) { if (bbt[i] || bbt[i + 1]) continue; - prandom_bytes_state(&rnd_state, writebuf, - mtd->ecclayout->oobavail * 2); + prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2); addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize; ops.mode = MTD_OPS_AUTO_OOB; ops.len = 0; ops.retlen = 0; - ops.ooblen = mtd->ecclayout->oobavail * 2; + ops.ooblen = mtd->oobavail * 2; ops.oobretlen = 0; ops.ooboffs = 0; ops.datbuf = NULL; @@ -688,7 +687,7 @@ static int __init mtd_oobtest_init(void) if (err) goto out; if (memcmpshow(addr, readbuf, writebuf, - mtd->ecclayout->oobavail * 2)) { + mtd->oobavail * 2)) { pr_err("error: verify failed at %#llx\n", (long long)addr); errcnt += 1; diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index 2a45ac210b16..989036c681b8 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c @@ -153,3 +153,52 @@ int ubi_check_pattern(const void *buf, uint8_t patt, int size) return 0; return 1; } + +/* Normal UBI messages */ +void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_notice(UBI_NAME_STR "%d: %pV\n", ubi->ubi_num, &vaf); + + va_end(args); +} + +/* UBI warning messages */ +void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_warn(UBI_NAME_STR "%d warning: %ps: %pV\n", + ubi->ubi_num, __builtin_return_address(0), &vaf); + + va_end(args); +} + +/* UBI error messages */ +void ubi_err(const struct ubi_device *ubi, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_err(UBI_NAME_STR "%d error: %ps: %pV\n", + ubi->ubi_num, __builtin_return_address(0), &vaf); + va_end(args); +} diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 2974b67f6c6c..dadc6a9d5755 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -49,15 +49,19 @@ /* UBI name used for character devices, sysfs, etc */ #define UBI_NAME_STR "ubi" +struct ubi_device; + /* Normal UBI messages */ -#define ubi_msg(ubi, fmt, ...) pr_notice(UBI_NAME_STR "%d: " fmt "\n", \ - ubi->ubi_num, ##__VA_ARGS__) +__printf(2, 3) +void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...); + /* UBI warning messages */ -#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \ - ubi->ubi_num, __func__, ##__VA_ARGS__) +__printf(2, 3) +void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...); + /* UBI error messages */ -#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \ - ubi->ubi_num, __func__, ##__VA_ARGS__) +__printf(2, 3) +void ubi_err(const struct ubi_device *ubi, const char *fmt, ...); /* Background thread name pattern */ #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 802d55457f19..fd90f3737963 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -7,7 +7,7 @@ * (at your option) any later version. */ -#include <asm-generic/io-64-nonatomic-hi-lo.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/of_mdio.h> #include "hns_dsaf_main.h" #include "hns_dsaf_mac.h" diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index ab264e1bccd0..75683fb26734 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -45,7 +45,7 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/pci.h> -#include <asm-generic/io-64-nonatomic-hi-lo.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include "nfp_net_ctrl.h" diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 588803ad6847..6ccba0d862df 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) return 0; } -static int amd_ntb_peer_db_addr(struct ntb_dev *ntb, - phys_addr_t *db_addr, - resource_size_t *db_size) -{ - struct amd_ntb_dev *ndev = ntb_ndev(ntb); - - if (db_addr) - *db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET); - if (db_size) - *db_size = sizeof(u32); - - return 0; -} - static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); @@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb, return 0; } -static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, - phys_addr_t *spad_addr) -{ - struct amd_ntb_dev *ndev = ntb_ndev(ntb); - - if (idx < 0 || idx >= ndev->spad_count) - return -EINVAL; - - if (spad_addr) - *spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET + - ndev->peer_spad + (idx << 2)); - return 0; -} - static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); @@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = { .db_clear = amd_ntb_db_clear, .db_set_mask = amd_ntb_db_set_mask, .db_clear_mask = amd_ntb_db_clear_mask, - .peer_db_addr = amd_ntb_peer_db_addr, .peer_db_set = amd_ntb_peer_db_set, .spad_count = amd_ntb_spad_count, .spad_read = amd_ntb_spad_read, .spad_write = amd_ntb_spad_write, - .peer_spad_addr = amd_ntb_peer_spad_addr, .peer_spad_read = amd_ntb_peer_spad_read, .peer_spad_write = amd_ntb_peer_spad_write, }; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index ec4775f0ec16..2ef9d9130864 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -124,6 +124,7 @@ struct ntb_transport_qp { bool client_ready; bool link_is_up; + bool active; u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ u64 qp_bit; @@ -719,6 +720,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) { qp->link_is_up = false; + qp->active = false; qp->tx_index = 0; qp->rx_index = 0; @@ -827,7 +829,7 @@ static void ntb_transport_link_work(struct work_struct *work) struct pci_dev *pdev = ndev->pdev; resource_size_t size; u32 val; - int rc, i, spad; + int rc = 0, i, spad; /* send the local info, in the opposite order of the way we read it */ for (i = 0; i < nt->mw_count; i++) { @@ -897,6 +899,13 @@ static void ntb_transport_link_work(struct work_struct *work) out1: for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); + + /* if there's an actual failure, we should just bail */ + if (rc < 0) { + ntb_link_disable(ndev); + return; + } + out: if (ntb_link_is_up(ndev, NULL, NULL) == 1) schedule_delayed_work(&nt->link_work, @@ -926,11 +935,13 @@ static void ntb_qp_link_work(struct work_struct *work) if (val & BIT(qp->qp_num)) { dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); qp->link_is_up = true; + qp->active = true; if (qp->event_handler) qp->event_handler(qp->cb_data, qp->link_is_up); - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); } else if (nt->link_is_up) schedule_delayed_work(&qp->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); @@ -1411,7 +1422,8 @@ static void ntb_transport_rxc_db(unsigned long data) if (i == qp->rx_max_entry) { /* there is more work to do */ - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { /* the doorbell bit is set: clear it */ ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); @@ -1422,7 +1434,8 @@ static void ntb_transport_rxc_db(unsigned long data) * ntb_process_rxc and clearing the doorbell bit: * there might be some more work to do. */ - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); } } @@ -1760,6 +1773,8 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) pdev = qp->ndev->pdev; + qp->active = false; + if (qp->tx_dma_chan) { struct dma_chan *chan = qp->tx_dma_chan; /* Putting the dma_chan to NULL will force any new traffic to be @@ -1793,7 +1808,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) qp_bit = BIT_ULL(qp->qp_num); ntb_db_set_mask(qp->ndev, qp_bit); - tasklet_disable(&qp->rxc_db_work); + tasklet_kill(&qp->rxc_db_work); cancel_delayed_work_sync(&qp->link_work); @@ -1886,7 +1901,8 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); return 0; } @@ -2069,7 +2085,8 @@ static void ntb_transport_doorbell_callback(void *data, int vector) qp_num = __ffs(db_bits); qp = &nt->qp_vec[qp_num]; - tasklet_schedule(&qp->rxc_db_work); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); db_bits &= ~BIT_ULL(qp_num); } diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index c8a37ba4b4f9..8dfce9c9aad0 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -178,7 +178,7 @@ static void perf_copy_callback(void *data) atomic_dec(&pctx->dma_sync); } -static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst, +static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst, char *src, size_t size) { struct perf_ctx *perf = pctx->perf; @@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst, dma_cookie_t cookie; size_t src_off, dst_off; struct perf_mw *mw = &perf->mw; - u64 vbase, dst_vaddr; + void __iomem *vbase; + void __iomem *dst_vaddr; dma_addr_t dst_phys; int retries = 0; @@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst, } device = chan->device; - src_off = (size_t)src & ~PAGE_MASK; - dst_off = (size_t)dst & ~PAGE_MASK; + src_off = (uintptr_t)src & ~PAGE_MASK; + dst_off = (uintptr_t __force)dst & ~PAGE_MASK; if (!is_dma_copy_aligned(device, src_off, dst_off, size)) return -ENODEV; - vbase = (u64)(u64 *)mw->vbase; - dst_vaddr = (u64)(u64 *)dst; + vbase = mw->vbase; + dst_vaddr = dst; dst_phys = mw->phys_addr + (dst_vaddr - vbase); unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); @@ -261,13 +262,13 @@ err_get_unmap: return 0; } -static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src, +static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src, u64 buf_size, u64 win_size, u64 total) { int chunks, total_chunks, i; int copied_chunks = 0; u64 copied = 0, result; - char *tmp = dst; + char __iomem *tmp = dst; u64 perf, diff_us; ktime_t kstart, kstop, kdiff; @@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data) struct perf_ctx *perf = pctx->perf; struct pci_dev *pdev = perf->ntb->pdev; struct perf_mw *mw = &perf->mw; - char *dst; + char __iomem *dst; u64 win_size, buf_size, total; void *src; int rc, node, i; @@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data) if (buf_size > MAX_TEST_SIZE) buf_size = MAX_TEST_SIZE; - dst = (char *)mw->vbase; + dst = (char __iomem *)mw->vbase; atomic_inc(&perf->tsync); while (atomic_read(&perf->tsync) != perf->perf_threads) @@ -424,6 +425,7 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size) { struct perf_mw *mw = &perf->mw; size_t xlat_size, buf_size; + int rc; if (!size) return -EINVAL; @@ -447,6 +449,13 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size) mw->buf_size = 0; } + rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size); + if (rc) { + dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n"); + perf_free_mw(perf); + return -EIO; + } + return 0; } @@ -541,6 +550,8 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, return 0; buf = kmalloc(64, GFP_KERNEL); + if (!buf) + return -ENOMEM; out_offset = snprintf(buf, 64, "%d\n", perf->run); ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); kfree(buf); @@ -548,6 +559,21 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, return ret; } +static void threads_cleanup(struct perf_ctx *perf) +{ + struct pthr_ctx *pctx; + int i; + + perf->run = false; + for (i = 0; i < MAX_THREADS; i++) { + pctx = &perf->pthr_ctx[i]; + if (pctx->thread) { + kthread_stop(pctx->thread); + pctx->thread = NULL; + } + } +} + static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *offp) { @@ -563,17 +589,9 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, if (atomic_read(&perf->tsync) == 0) perf->run = false; - if (perf->run) { - /* lets stop the threads */ - perf->run = false; - for (i = 0; i < MAX_THREADS; i++) { - if (perf->pthr_ctx[i].thread) { - kthread_stop(perf->pthr_ctx[i].thread); - perf->pthr_ctx[i].thread = NULL; - } else - break; - } - } else { + if (perf->run) + threads_cleanup(perf); + else { perf->run = true; if (perf->perf_threads > MAX_THREADS) { @@ -604,17 +622,11 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, kthread_create_on_node(ntb_perf_thread, (void *)pctx, node, "ntb_perf %d", i); - if (pctx->thread) + if (IS_ERR(pctx->thread)) { + pctx->thread = NULL; + goto err; + } else wake_up_process(pctx->thread); - else { - perf->run = false; - for (i = 0; i < MAX_THREADS; i++) { - if (pctx->thread) { - kthread_stop(pctx->thread); - pctx->thread = NULL; - } - } - } if (perf->run == false) return -ENXIO; @@ -623,6 +635,10 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, } return count; + +err: + threads_cleanup(perf); + return -ENXIO; } static const struct file_operations ntb_perf_debugfs_run = { diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index ca5721c306bb..cc31c6f1f88e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -99,7 +99,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, if (unlikely(bad_pmem)) rc = -EIO; else { - memcpy_from_pmem(mem + off, pmem_addr, len); + rc = memcpy_from_pmem(mem + off, pmem_addr, len); flush_dcache_page(page); } } else { @@ -295,7 +295,7 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns, if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align))) return -EIO; - memcpy_from_pmem(buf, pmem->virt_addr + offset, size); + return memcpy_from_pmem(buf, pmem->virt_addr + offset, size); } else { memcpy_to_pmem(pmem->virt_addr + offset, buf, size); wmb_pmem(); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 42a01a931989..9461dd639acd 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -146,6 +146,14 @@ struct nvme_nvm_command { }; }; +struct nvme_nvm_completion { + __le64 result; /* Used by LightNVM to return ppa completions */ + __le16 sq_head; /* how much of this queue may be reclaimed */ + __le16 sq_id; /* submission queue that generated this entry */ + __u16 command_id; /* of the command which completed */ + __le16 status; /* did the command fail, and if so, why? */ +}; + #define NVME_NVM_LP_MLC_PAIRS 886 struct nvme_nvm_lp_mlc { __u16 num_pairs; @@ -507,6 +515,10 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, static void nvme_nvm_end_io(struct request *rq, int error) { struct nvm_rq *rqd = rq->end_io_data; + struct nvme_nvm_completion *cqe = rq->special; + + if (cqe) + rqd->ppa_status = le64_to_cpu(cqe->result); nvm_end_io(rqd, error); @@ -526,7 +538,8 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) if (IS_ERR(rq)) return -ENOMEM; - cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL); + cmd = kzalloc(sizeof(struct nvme_nvm_command) + + sizeof(struct nvme_nvm_completion), GFP_KERNEL); if (!cmd) { blk_mq_free_request(rq); return -ENOMEM; @@ -545,7 +558,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) rq->cmd = (unsigned char *)cmd; rq->cmd_len = sizeof(struct nvme_nvm_command); - rq->special = (void *)0; + rq->special = cmd + 1; rq->end_io_data = rqd; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f8db70ae172d..24ccda303efb 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -723,6 +723,13 @@ static void nvme_complete_rq(struct request *req) blk_mq_end_request(req, error); } +/* We read the CQE phase first to check if the rest of the entry is valid */ +static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head, + u16 phase) +{ + return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase; +} + static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) { u16 head, phase; @@ -730,13 +737,10 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) head = nvmeq->cq_head; phase = nvmeq->cq_phase; - for (;;) { + while (nvme_cqe_valid(nvmeq, head, phase)) { struct nvme_completion cqe = nvmeq->cqes[head]; - u16 status = le16_to_cpu(cqe.status); struct request *req; - if ((status & 1) != phase) - break; if (++head == nvmeq->q_depth) { head = 0; phase = !phase; @@ -767,7 +771,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) memcpy(req->special, &cqe, sizeof(cqe)); - blk_mq_complete_request(req, status >> 1); + blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1); } @@ -808,18 +812,16 @@ static irqreturn_t nvme_irq(int irq, void *data) static irqreturn_t nvme_irq_check(int irq, void *data) { struct nvme_queue *nvmeq = data; - struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; - if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) - return IRQ_NONE; - return IRQ_WAKE_THREAD; + if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) + return IRQ_WAKE_THREAD; + return IRQ_NONE; } static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) { struct nvme_queue *nvmeq = hctx->driver_data; - if ((le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) == - nvmeq->cq_phase) { + if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) { spin_lock_irq(&nvmeq->q_lock); __nvme_process_cq(nvmeq, &tag); spin_unlock_irq(&nvmeq->q_lock); diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 80994566a1c8..8986382718dd 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c @@ -47,6 +47,10 @@ #define RK3368_SOC_CON15_FLASH0 BIT(14) #define RK3368_SOC_FLASH_SUPPLY_NUM 2 +#define RK3399_PMUGRF_CON0 0x180 +#define RK3399_PMUGRF_CON0_VSEL BIT(8) +#define RK3399_PMUGRF_VSEL_SUPPLY_NUM 9 + struct rockchip_iodomain; /** @@ -181,6 +185,25 @@ static void rk3368_iodomain_init(struct rockchip_iodomain *iod) dev_warn(iod->dev, "couldn't update flash0 ctrl\n"); } +static void rk3399_pmu_iodomain_init(struct rockchip_iodomain *iod) +{ + int ret; + u32 val; + + /* if no pmu io supply we should leave things alone */ + if (!iod->supplies[RK3399_PMUGRF_VSEL_SUPPLY_NUM].reg) + return; + + /* + * set pmu io iodomain to also use this framework + * instead of a special gpio. + */ + val = RK3399_PMUGRF_CON0_VSEL | (RK3399_PMUGRF_CON0_VSEL << 16); + ret = regmap_write(iod->grf, RK3399_PMUGRF_CON0, val); + if (ret < 0) + dev_warn(iod->dev, "couldn't update pmu io iodomain ctrl\n"); +} + /* * On the rk3188 the io-domains are handled by a shared register with the * lower 8 bits being still being continuing drive-strength settings. @@ -252,6 +275,33 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = { }, }; +static const struct rockchip_iodomain_soc_data soc_data_rk3399 = { + .grf_offset = 0xe640, + .supply_names = { + "bt656", /* APIO2_VDD */ + "audio", /* APIO5_VDD */ + "sdmmc", /* SDMMC0_VDD */ + "gpio1830", /* APIO4_VDD */ + }, +}; + +static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = { + .grf_offset = 0x180, + .supply_names = { + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + "pmu1830", /* PMUIO2_VDD */ + }, + .init = rk3399_pmu_iodomain_init, +}; + static const struct of_device_id rockchip_iodomain_match[] = { { .compatible = "rockchip,rk3188-io-voltage-domain", @@ -269,6 +319,14 @@ static const struct of_device_id rockchip_iodomain_match[] = { .compatible = "rockchip,rk3368-pmu-io-voltage-domain", .data = (void *)&soc_data_rk3368_pmu }, + { + .compatible = "rockchip,rk3399-io-voltage-domain", + .data = (void *)&soc_data_rk3399 + }, + { + .compatible = "rockchip,rk3399-pmu-io-voltage-domain", + .data = (void *)&soc_data_rk3399_pmu + }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, rockchip_iodomain_match); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 544bd3493852..3e84315c6f12 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -589,7 +589,7 @@ config RTC_DRV_RV3029_HWMON default y help Say Y here if you want to expose temperature sensor data on - rtc-rv3029c2. + rtc-rv3029. config RTC_DRV_RV8803 tristate "Micro Crystal RV8803" diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c index d41bbcd653f6..ba0d61934d35 100644 --- a/drivers/rtc/rtc-abx80x.c +++ b/drivers/rtc/rtc-abx80x.c @@ -49,7 +49,20 @@ #define ABX8XX_REG_CD_TIMER_CTL 0x18 +#define ABX8XX_REG_OSC 0x1c +#define ABX8XX_OSC_FOS BIT(3) +#define ABX8XX_OSC_BOS BIT(4) +#define ABX8XX_OSC_ACAL_512 BIT(5) +#define ABX8XX_OSC_ACAL_1024 BIT(6) + +#define ABX8XX_OSC_OSEL BIT(7) + +#define ABX8XX_REG_OSS 0x1d +#define ABX8XX_OSS_OF BIT(1) +#define ABX8XX_OSS_OMODE BIT(4) + #define ABX8XX_REG_CFG_KEY 0x1f +#define ABX8XX_CFG_KEY_OSC 0xa1 #define ABX8XX_CFG_KEY_MISC 0x9d #define ABX8XX_REG_ID0 0x28 @@ -81,6 +94,20 @@ static struct abx80x_cap abx80x_caps[] = { [ABX80X] = {.pn = 0} }; +static int abx80x_is_rc_mode(struct i2c_client *client) +{ + int flags = 0; + + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS); + if (flags < 0) { + dev_err(&client->dev, + "Failed to read autocalibration attribute\n"); + return flags; + } + + return (flags & ABX8XX_OSS_OMODE) ? 1 : 0; +} + static int abx80x_enable_trickle_charger(struct i2c_client *client, u8 trickle_cfg) { @@ -112,7 +139,23 @@ static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); unsigned char buf[8]; - int err; + int err, flags, rc_mode = 0; + + /* Read the Oscillator Failure only in XT mode */ + rc_mode = abx80x_is_rc_mode(client); + if (rc_mode < 0) + return rc_mode; + + if (!rc_mode) { + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS); + if (flags < 0) + return flags; + + if (flags & ABX8XX_OSS_OF) { + dev_err(dev, "Oscillator failure, data is invalid.\n"); + return -EINVAL; + } + } err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH, sizeof(buf), buf); @@ -140,7 +183,7 @@ static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); unsigned char buf[8]; - int err; + int err, flags; if (tm->tm_year < 100) return -EINVAL; @@ -161,6 +204,18 @@ static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm) return -EIO; } + /* Clear the OF bit of Oscillator Status Register */ + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS); + if (flags < 0) + return flags; + + err = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSS, + flags & ~ABX8XX_OSS_OF); + if (err < 0) { + dev_err(&client->dev, "Unable to write oscillator status register\n"); + return err; + } + return 0; } @@ -248,6 +303,174 @@ static int abx80x_set_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } +static int abx80x_rtc_set_autocalibration(struct device *dev, + int autocalibration) +{ + struct i2c_client *client = to_i2c_client(dev); + int retval, flags = 0; + + if ((autocalibration != 0) && (autocalibration != 1024) && + (autocalibration != 512)) { + dev_err(dev, "autocalibration value outside permitted range\n"); + return -EINVAL; + } + + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC); + if (flags < 0) + return flags; + + if (autocalibration == 0) { + flags &= ~(ABX8XX_OSC_ACAL_512 | ABX8XX_OSC_ACAL_1024); + } else if (autocalibration == 1024) { + /* 1024 autocalibration is 0x10 */ + flags |= ABX8XX_OSC_ACAL_1024; + flags &= ~(ABX8XX_OSC_ACAL_512); + } else { + /* 512 autocalibration is 0x11 */ + flags |= (ABX8XX_OSC_ACAL_1024 | ABX8XX_OSC_ACAL_512); + } + + /* Unlock write access to Oscillator Control Register */ + retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY, + ABX8XX_CFG_KEY_OSC); + if (retval < 0) { + dev_err(dev, "Failed to write CONFIG_KEY register\n"); + return retval; + } + + retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSC, flags); + + return retval; +} + +static int abx80x_rtc_get_autocalibration(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + int flags = 0, autocalibration; + + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC); + if (flags < 0) + return flags; + + if (flags & ABX8XX_OSC_ACAL_512) + autocalibration = 512; + else if (flags & ABX8XX_OSC_ACAL_1024) + autocalibration = 1024; + else + autocalibration = 0; + + return autocalibration; +} + +static ssize_t autocalibration_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int retval; + unsigned long autocalibration = 0; + + retval = kstrtoul(buf, 10, &autocalibration); + if (retval < 0) { + dev_err(dev, "Failed to store RTC autocalibration attribute\n"); + return -EINVAL; + } + + retval = abx80x_rtc_set_autocalibration(dev, autocalibration); + + return retval ? retval : count; +} + +static ssize_t autocalibration_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int autocalibration = 0; + + autocalibration = abx80x_rtc_get_autocalibration(dev); + if (autocalibration < 0) { + dev_err(dev, "Failed to read RTC autocalibration\n"); + sprintf(buf, "0\n"); + return autocalibration; + } + + return sprintf(buf, "%d\n", autocalibration); +} + +static DEVICE_ATTR_RW(autocalibration); + +static ssize_t oscillator_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + int retval, flags, rc_mode = 0; + + if (strncmp(buf, "rc", 2) == 0) { + rc_mode = 1; + } else if (strncmp(buf, "xtal", 4) == 0) { + rc_mode = 0; + } else { + dev_err(dev, "Oscillator selection value outside permitted ones\n"); + return -EINVAL; + } + + flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC); + if (flags < 0) + return flags; + + if (rc_mode == 0) + flags &= ~(ABX8XX_OSC_OSEL); + else + flags |= (ABX8XX_OSC_OSEL); + + /* Unlock write access on Oscillator Control register */ + retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY, + ABX8XX_CFG_KEY_OSC); + if (retval < 0) { + dev_err(dev, "Failed to write CONFIG_KEY register\n"); + return retval; + } + + retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSC, flags); + if (retval < 0) { + dev_err(dev, "Failed to write Oscillator Control register\n"); + return retval; + } + + return retval ? retval : count; +} + +static ssize_t oscillator_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int rc_mode = 0; + struct i2c_client *client = to_i2c_client(dev); + + rc_mode = abx80x_is_rc_mode(client); + + if (rc_mode < 0) { + dev_err(dev, "Failed to read RTC oscillator selection\n"); + sprintf(buf, "\n"); + return rc_mode; + } + + if (rc_mode) + return sprintf(buf, "rc\n"); + else + return sprintf(buf, "xtal\n"); +} + +static DEVICE_ATTR_RW(oscillator); + +static struct attribute *rtc_calib_attrs[] = { + &dev_attr_autocalibration.attr, + &dev_attr_oscillator.attr, + NULL, +}; + +static const struct attribute_group rtc_calib_attr_group = { + .attrs = rtc_calib_attrs, +}; + static int abx80x_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct i2c_client *client = to_i2c_client(dev); @@ -303,6 +526,13 @@ static int abx80x_dt_trickle_cfg(struct device_node *np) return (trickle_cfg | i); } +static void rtc_calib_remove_sysfs_group(void *_dev) +{ + struct device *dev = _dev; + + sysfs_remove_group(&dev->kobj, &rtc_calib_attr_group); +} + static int abx80x_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -405,6 +635,24 @@ static int abx80x_probe(struct i2c_client *client, } } + /* Export sysfs entries */ + err = sysfs_create_group(&(&client->dev)->kobj, &rtc_calib_attr_group); + if (err) { + dev_err(&client->dev, "Failed to create sysfs group: %d\n", + err); + return err; + } + + err = devm_add_action(&client->dev, rtc_calib_remove_sysfs_group, + &client->dev); + if (err) { + rtc_calib_remove_sysfs_group(&client->dev); + dev_err(&client->dev, + "Failed to add sysfs cleanup action: %d\n", + err); + return err; + } + return 0; } diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c index 14e08c4c1a01..355fdb97a006 100644 --- a/drivers/rtc/rtc-asm9260.c +++ b/drivers/rtc/rtc-asm9260.c @@ -255,7 +255,7 @@ static const struct rtc_class_ops asm9260_rtc_ops = { .alarm_irq_enable = asm9260_alarm_irq_enable, }; -static int __init asm9260_rtc_probe(struct platform_device *pdev) +static int asm9260_rtc_probe(struct platform_device *pdev) { struct asm9260_rtc_priv *priv; struct device *dev = &pdev->dev; @@ -323,7 +323,7 @@ err_return: return ret; } -static int __exit asm9260_rtc_remove(struct platform_device *pdev) +static int asm9260_rtc_remove(struct platform_device *pdev) { struct asm9260_rtc_priv *priv = platform_get_drvdata(pdev); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index a82937e2f824..d107a8e72a7d 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -176,7 +176,13 @@ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm) bin2bcd(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f); buf[M41T80_REG_MON] = bin2bcd(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f); + /* assume 20YY not 19YY */ + if (tm->tm_year < 100 || tm->tm_year > 199) { + dev_err(&client->dev, "Year must be between 2000 and 2099. It's %d.\n", + tm->tm_year + 1900); + return -EINVAL; + } buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year % 100); if (i2c_transfer(client->adapter, msgs, 1) != 1) { diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c index 1c91ce8a6d75..025bb33b9cd2 100644 --- a/drivers/rtc/rtc-mcp795.c +++ b/drivers/rtc/rtc-mcp795.c @@ -20,6 +20,7 @@ #include <linux/printk.h> #include <linux/spi/spi.h> #include <linux/rtc.h> +#include <linux/of.h> /* MCP795 Instructions, see datasheet table 3-1 */ #define MCP795_EEREAD 0x03 @@ -183,9 +184,18 @@ static int mcp795_probe(struct spi_device *spi) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id mcp795_of_match[] = { + { .compatible = "maxim,mcp795" }, + { } +}; +MODULE_DEVICE_TABLE(of, mcp795_of_match); +#endif + static struct spi_driver mcp795_driver = { .driver = { .name = "rtc-mcp795", + .of_match_table = of_match_ptr(mcp795_of_match), }, .probe = mcp795_probe, }; diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 8d9f35ceb808..f623038e586e 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -61,11 +61,14 @@ static irqreturn_t rv8803_handle_irq(int irq, void *dev_id) struct i2c_client *client = dev_id; struct rv8803_data *rv8803 = i2c_get_clientdata(client); unsigned long events = 0; - int flags; + int flags, try = 0; mutex_lock(&rv8803->flags_lock); - flags = i2c_smbus_read_byte_data(client, RV8803_FLAG); + do { + flags = i2c_smbus_read_byte_data(client, RV8803_FLAG); + try++; + } while ((flags == -ENXIO) && (try < 3)); if (flags <= 0) { mutex_unlock(&rv8803->flags_lock); return IRQ_NONE; @@ -424,7 +427,7 @@ static int rv8803_probe(struct i2c_client *client, { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct rv8803_data *rv8803; - int err, flags; + int err, flags, try = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) { @@ -441,7 +444,16 @@ static int rv8803_probe(struct i2c_client *client, rv8803->client = client; i2c_set_clientdata(client, rv8803); - flags = i2c_smbus_read_byte_data(client, RV8803_FLAG); + /* + * There is a 60µs window where the RTC may not reply on the i2c bus in + * that case, the transfer is not ACKed. In that case, ensure there are + * multiple attempts. + */ + do { + flags = i2c_smbus_read_byte_data(client, RV8803_FLAG); + try++; + } while ((flags == -ENXIO) && (try < 3)); + if (flags < 0) return flags; @@ -476,8 +488,12 @@ static int rv8803_probe(struct i2c_client *client, return PTR_ERR(rv8803->rtc); } - err = i2c_smbus_write_byte_data(rv8803->client, RV8803_EXT, - RV8803_EXT_WADA); + try = 0; + do { + err = i2c_smbus_write_byte_data(rv8803->client, RV8803_EXT, + RV8803_EXT_WADA); + try++; + } while ((err == -ENXIO) && (try < 3)); if (err) return err; diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index ffb860d18701..d01ad7e8078e 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -501,18 +501,27 @@ static int s3c_rtc_probe(struct platform_device *pdev) info->rtc_clk = devm_clk_get(&pdev->dev, "rtc"); if (IS_ERR(info->rtc_clk)) { - dev_err(&pdev->dev, "failed to find rtc clock\n"); - return PTR_ERR(info->rtc_clk); + ret = PTR_ERR(info->rtc_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to find rtc clock\n"); + else + dev_dbg(&pdev->dev, "probe deferred due to missing rtc clk\n"); + return ret; } clk_prepare_enable(info->rtc_clk); if (info->data->needs_src_clk) { info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src"); if (IS_ERR(info->rtc_src_clk)) { - dev_err(&pdev->dev, - "failed to find rtc source clock\n"); + ret = PTR_ERR(info->rtc_src_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to find rtc source clock\n"); + else + dev_dbg(&pdev->dev, + "probe deferred due to missing rtc src clk\n"); clk_disable_unprepare(info->rtc_clk); - return PTR_ERR(info->rtc_src_clk); + return ret; } clk_prepare_enable(info->rtc_src_clk); } diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 17ad5749e91d..1e560188dd13 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -317,17 +317,17 @@ static int _add_device_to_lcu(struct alias_lcu *lcu, struct alias_pav_group *group; struct dasd_uid uid; + spin_lock(get_ccwdev_lock(device->cdev)); private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type; private->uid.base_unit_addr = lcu->uac->unit[private->uid.real_unit_addr].base_ua; uid = private->uid; - + spin_unlock(get_ccwdev_lock(device->cdev)); /* if we have no PAV anyway, we don't need to bother with PAV groups */ if (lcu->pav == NO_PAV) { list_move(&device->alias_list, &lcu->active_devices); return 0; } - group = _find_group(lcu, &uid); if (!group) { group = kzalloc(sizeof(*group), GFP_ATOMIC); @@ -397,130 +397,6 @@ suborder_not_supported(struct dasd_ccw_req *cqr) return 0; } -/* - * This function tries to lock all devices on an lcu via trylock - * return NULL on success otherwise return first failed device - */ -static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu, - struct dasd_device *pos) - -{ - struct alias_pav_group *pavgroup; - struct dasd_device *device; - - list_for_each_entry(device, &lcu->active_devices, alias_list) { - if (device == pos) - continue; - if (!spin_trylock(get_ccwdev_lock(device->cdev))) - return device; - } - list_for_each_entry(device, &lcu->inactive_devices, alias_list) { - if (device == pos) - continue; - if (!spin_trylock(get_ccwdev_lock(device->cdev))) - return device; - } - list_for_each_entry(pavgroup, &lcu->grouplist, group) { - list_for_each_entry(device, &pavgroup->baselist, alias_list) { - if (device == pos) - continue; - if (!spin_trylock(get_ccwdev_lock(device->cdev))) - return device; - } - list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { - if (device == pos) - continue; - if (!spin_trylock(get_ccwdev_lock(device->cdev))) - return device; - } - } - return NULL; -} - -/* - * unlock all devices except the one that is specified as pos - * stop if enddev is specified and reached - */ -static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu, - struct dasd_device *pos, - struct dasd_device *enddev) - -{ - struct alias_pav_group *pavgroup; - struct dasd_device *device; - - list_for_each_entry(device, &lcu->active_devices, alias_list) { - if (device == pos) - continue; - if (device == enddev) - return; - spin_unlock(get_ccwdev_lock(device->cdev)); - } - list_for_each_entry(device, &lcu->inactive_devices, alias_list) { - if (device == pos) - continue; - if (device == enddev) - return; - spin_unlock(get_ccwdev_lock(device->cdev)); - } - list_for_each_entry(pavgroup, &lcu->grouplist, group) { - list_for_each_entry(device, &pavgroup->baselist, alias_list) { - if (device == pos) - continue; - if (device == enddev) - return; - spin_unlock(get_ccwdev_lock(device->cdev)); - } - list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { - if (device == pos) - continue; - if (device == enddev) - return; - spin_unlock(get_ccwdev_lock(device->cdev)); - } - } -} - -/* - * this function is needed because the locking order - * device lock -> lcu lock - * needs to be assured when iterating over devices in an LCU - * - * if a device is specified in pos then the device lock is already hold - */ -static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu, - struct dasd_device *pos, - unsigned long *flags) -{ - struct dasd_device *failed; - - do { - spin_lock_irqsave(&lcu->lock, *flags); - failed = _trylock_all_devices_on_lcu(lcu, pos); - if (failed) { - _unlock_all_devices_on_lcu(lcu, pos, failed); - spin_unlock_irqrestore(&lcu->lock, *flags); - cpu_relax(); - } - } while (failed); -} - -static void _trylock_and_lock_lcu(struct alias_lcu *lcu, - struct dasd_device *pos) -{ - struct dasd_device *failed; - - do { - spin_lock(&lcu->lock); - failed = _trylock_all_devices_on_lcu(lcu, pos); - if (failed) { - _unlock_all_devices_on_lcu(lcu, pos, failed); - spin_unlock(&lcu->lock); - cpu_relax(); - } - } while (failed); -} - static int read_unit_address_configuration(struct dasd_device *device, struct alias_lcu *lcu) { @@ -615,7 +491,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) if (rc) return rc; - _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags); + spin_lock_irqsave(&lcu->lock, flags); lcu->pav = NO_PAV; for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { switch (lcu->uac->unit[i].ua_type) { @@ -634,7 +510,6 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) alias_list) { _add_device_to_lcu(lcu, device, refdev); } - _unlock_all_devices_on_lcu(lcu, NULL, NULL); spin_unlock_irqrestore(&lcu->lock, flags); return 0; } @@ -722,8 +597,7 @@ int dasd_alias_add_device(struct dasd_device *device) lcu = private->lcu; rc = 0; - spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); - spin_lock(&lcu->lock); + spin_lock_irqsave(&lcu->lock, flags); if (!(lcu->flags & UPDATE_PENDING)) { rc = _add_device_to_lcu(lcu, device, device); if (rc) @@ -733,8 +607,7 @@ int dasd_alias_add_device(struct dasd_device *device) list_move(&device->alias_list, &lcu->active_devices); _schedule_lcu_update(lcu, device); } - spin_unlock(&lcu->lock); - spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + spin_unlock_irqrestore(&lcu->lock, flags); return rc; } @@ -933,15 +806,27 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu) struct alias_pav_group *pavgroup; struct dasd_device *device; - list_for_each_entry(device, &lcu->active_devices, alias_list) + list_for_each_entry(device, &lcu->active_devices, alias_list) { + spin_lock(get_ccwdev_lock(device->cdev)); dasd_device_set_stop_bits(device, DASD_STOPPED_SU); - list_for_each_entry(device, &lcu->inactive_devices, alias_list) + spin_unlock(get_ccwdev_lock(device->cdev)); + } + list_for_each_entry(device, &lcu->inactive_devices, alias_list) { + spin_lock(get_ccwdev_lock(device->cdev)); dasd_device_set_stop_bits(device, DASD_STOPPED_SU); + spin_unlock(get_ccwdev_lock(device->cdev)); + } list_for_each_entry(pavgroup, &lcu->grouplist, group) { - list_for_each_entry(device, &pavgroup->baselist, alias_list) + list_for_each_entry(device, &pavgroup->baselist, alias_list) { + spin_lock(get_ccwdev_lock(device->cdev)); dasd_device_set_stop_bits(device, DASD_STOPPED_SU); - list_for_each_entry(device, &pavgroup->aliaslist, alias_list) + spin_unlock(get_ccwdev_lock(device->cdev)); + } + list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { + spin_lock(get_ccwdev_lock(device->cdev)); dasd_device_set_stop_bits(device, DASD_STOPPED_SU); + spin_unlock(get_ccwdev_lock(device->cdev)); + } } } @@ -950,15 +835,27 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu) struct alias_pav_group *pavgroup; struct dasd_device *device; - list_for_each_entry(device, &lcu->active_devices, alias_list) + list_for_each_entry(device, &lcu->active_devices, alias_list) { + spin_lock(get_ccwdev_lock(device->cdev)); dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); - list_for_each_entry(device, &lcu->inactive_devices, alias_list) + spin_unlock(get_ccwdev_lock(device->cdev)); + } + list_for_each_entry(device, &lcu->inactive_devices, alias_list) { + spin_lock(get_ccwdev_lock(device->cdev)); dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); + spin_unlock(get_ccwdev_lock(device->cdev)); + } list_for_each_entry(pavgroup, &lcu->grouplist, group) { - list_for_each_entry(device, &pavgroup->baselist, alias_list) + list_for_each_entry(device, &pavgroup->baselist, alias_list) { + spin_lock(get_ccwdev_lock(device->cdev)); dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); - list_for_each_entry(device, &pavgroup->aliaslist, alias_list) + spin_unlock(get_ccwdev_lock(device->cdev)); + } + list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { + spin_lock(get_ccwdev_lock(device->cdev)); dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); + spin_unlock(get_ccwdev_lock(device->cdev)); + } } } @@ -984,48 +881,32 @@ static void summary_unit_check_handling_work(struct work_struct *work) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); reset_summary_unit_check(lcu, device, suc_data->reason); - _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags); + spin_lock_irqsave(&lcu->lock, flags); _unstop_all_devices_on_lcu(lcu); _restart_all_base_devices_on_lcu(lcu); /* 3. read new alias configuration */ _schedule_lcu_update(lcu, device); lcu->suc_data.device = NULL; dasd_put_device(device); - _unlock_all_devices_on_lcu(lcu, NULL, NULL); spin_unlock_irqrestore(&lcu->lock, flags); } -/* - * note: this will be called from int handler context (cdev locked) - */ -void dasd_alias_handle_summary_unit_check(struct dasd_device *device, - struct irb *irb) +void dasd_alias_handle_summary_unit_check(struct work_struct *work) { + struct dasd_device *device = container_of(work, struct dasd_device, + suc_work); struct dasd_eckd_private *private = device->private; struct alias_lcu *lcu; - char reason; - char *sense; - - sense = dasd_get_sense(irb); - if (sense) { - reason = sense[8]; - DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", - "eckd handle summary unit check: reason", reason); - } else { - DBF_DEV_EVENT(DBF_WARNING, device, "%s", - "eckd handle summary unit check:" - " no reason code available"); - return; - } + unsigned long flags; lcu = private->lcu; if (!lcu) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "device not ready to handle summary" " unit check (no lcu structure)"); - return; + goto out; } - _trylock_and_lock_lcu(lcu, device); + spin_lock_irqsave(&lcu->lock, flags); /* If this device is about to be removed just return and wait for * the next interrupt on a different device */ @@ -1033,27 +914,26 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device, DBF_DEV_EVENT(DBF_WARNING, device, "%s", "device is in offline processing," " don't do summary unit check handling"); - _unlock_all_devices_on_lcu(lcu, device, NULL); - spin_unlock(&lcu->lock); - return; + goto out_unlock; } if (lcu->suc_data.device) { /* already scheduled or running */ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "previous instance of summary unit check worker" " still pending"); - _unlock_all_devices_on_lcu(lcu, device, NULL); - spin_unlock(&lcu->lock); - return ; + goto out_unlock; } _stop_all_devices_on_lcu(lcu); /* prepare for lcu_update */ - private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; - lcu->suc_data.reason = reason; + lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; + lcu->suc_data.reason = private->suc_reason; lcu->suc_data.device = device; dasd_get_device(device); - _unlock_all_devices_on_lcu(lcu, device, NULL); - spin_unlock(&lcu->lock); if (!schedule_work(&lcu->suc_data.worker)) dasd_put_device(device); +out_unlock: + spin_unlock_irqrestore(&lcu->lock, flags); +out: + clear_bit(DASD_FLAG_SUC, &device->flags); + dasd_put_device(device); }; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 75c032dcf173..c1b4ae55e129 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1682,6 +1682,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device) /* setup work queue for validate server*/ INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); + /* setup work queue for summary unit check */ + INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check); if (!ccw_device_is_pathgroup(device->cdev)) { dev_warn(&device->cdev->dev, @@ -2549,14 +2551,6 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device, device->state == DASD_STATE_ONLINE && !test_bit(DASD_FLAG_OFFLINE, &device->flags) && !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { - /* - * the state change could be caused by an alias - * reassignment remove device from alias handling - * to prevent new requests from being scheduled on - * the wrong alias device - */ - dasd_alias_remove_device(device); - /* schedule worker to reload device */ dasd_reload_device(device); } @@ -2571,7 +2565,27 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device, /* summary unit check */ if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { - dasd_alias_handle_summary_unit_check(device, irb); + if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "eckd suc: device already notified"); + return; + } + sense = dasd_get_sense(irb); + if (!sense) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "eckd suc: no reason code available"); + clear_bit(DASD_FLAG_SUC, &device->flags); + return; + + } + private->suc_reason = sense[8]; + DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", + "eckd handle summary unit check: reason", + private->suc_reason); + dasd_get_device(device); + if (!schedule_work(&device->suc_work)) + dasd_put_device(device); + return; } @@ -4495,6 +4509,12 @@ static int dasd_eckd_reload_device(struct dasd_device *device) struct dasd_uid uid; unsigned long flags; + /* + * remove device from alias handling to prevent new requests + * from being scheduled on the wrong alias device + */ + dasd_alias_remove_device(device); + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); old_base = private->uid.base_unit_addr; spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index f8f91ee652d3..6d9a6d3517cd 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -525,6 +525,7 @@ struct dasd_eckd_private { int count; u32 fcx_max_data; + char suc_reason; }; @@ -534,7 +535,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *); int dasd_alias_add_device(struct dasd_device *); int dasd_alias_remove_device(struct dasd_device *); struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); -void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); +void dasd_alias_handle_summary_unit_check(struct work_struct *); void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); void dasd_alias_lcu_setup_complete(struct dasd_device *); void dasd_alias_wait_for_lcu_setup(struct dasd_device *); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 8de29be32a56..0f0add932e7a 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -470,6 +470,7 @@ struct dasd_device { struct work_struct restore_device; struct work_struct reload_device; struct work_struct kick_validate; + struct work_struct suc_work; struct timer_list timer; debug_info_t *debug_area; @@ -542,6 +543,7 @@ struct dasd_attention_data { #define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */ #define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */ #define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */ +#define DASD_FLAG_SUC 14 /* unhandled summary unit check */ #define DASD_SLEEPON_START_TAG ((void *) 1) #define DASD_SLEEPON_END_TAG ((void *) 2) diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 5bcdf8dd6fb0..a404a41e871c 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -332,7 +332,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, { int rel_port = -1, group_id; struct alua_port_group *pg, *old_pg = NULL; - bool pg_updated; + bool pg_updated = false; unsigned long flags; group_id = scsi_vpd_tpg_id(sdev, &rel_port); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 266b909fe854..f3032ca5051b 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -958,23 +958,22 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ default: - shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", - fnic_fcpio_status_to_str(hdr_status)); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; } - if (hdr_status != FCPIO_SUCCESS) { - atomic64_inc(&fnic_stats->io_stats.io_failures); - shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", - fnic_fcpio_status_to_str(hdr_status)); - } /* Break link with the SCSI command */ CMD_SP(sc) = NULL; CMD_FLAGS(sc) |= FNIC_IO_DONE; spin_unlock_irqrestore(io_lock, flags); + if (hdr_status != FCPIO_SUCCESS) { + atomic64_inc(&fnic_stats->io_stats.io_failures); + shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + fnic_fcpio_status_to_str(hdr_status)); + } + fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a544366a367e..f57d02c3b6cf 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba) } vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) + if (vports != NULL) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); @@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba) } spin_unlock_irq(shost->host_lock); } - lpfc_destroy_vport_work_array(phba, vports); + } + lpfc_destroy_vport_work_array(phba, vports); lpfc_unblock_mgmt_io(phba); return 0; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 4484e63033a5..fce414a2cd76 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2097,7 +2097,7 @@ struct megasas_instance { u8 UnevenSpanSupport; u8 supportmax256vd; - u8 allow_fw_scan; + u8 pd_list_not_supported; u16 fw_supported_vd_count; u16 fw_supported_pd_count; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 5c08568ccfbf..e6ebc7ae2df1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1838,7 +1838,7 @@ static int megasas_slave_configure(struct scsi_device *sdev) struct megasas_instance *instance; instance = megasas_lookup_instance(sdev->host->host_no); - if (instance->allow_fw_scan) { + if (instance->pd_list_not_supported) { if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + @@ -1874,7 +1874,8 @@ static int megasas_slave_alloc(struct scsi_device *sdev) pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; - if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState == + if ((instance->pd_list_not_supported || + instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM)) { goto scan_target; } @@ -4087,7 +4088,13 @@ megasas_get_pd_list(struct megasas_instance *instance) switch (ret) { case DCMD_FAILED: - megaraid_sas_kill_hba(instance); + dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " + "failed/not supported by firmware\n"); + + if (instance->ctrl_context) + megaraid_sas_kill_hba(instance); + else + instance->pd_list_not_supported = 1; break; case DCMD_TIMEOUT: @@ -5034,7 +5041,6 @@ static int megasas_init_fw(struct megasas_instance *instance) case PCI_DEVICE_ID_DELL_PERC5: default: instance->instancet = &megasas_instance_template_xscale; - instance->allow_fw_scan = 1; break; } @@ -6650,12 +6656,13 @@ out: } for (i = 0; i < ioc->sge_count; i++) { - if (kbuff_arr[i]) + if (kbuff_arr[i]) { dma_free_coherent(&instance->pdev->dev, le32_to_cpu(kern_sge32[i].length), kbuff_arr[i], le32_to_cpu(kern_sge32[i].phys_addr)); kbuff_arr[i] = NULL; + } } megasas_return_cmd(instance, cmd); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 985231900aca..8a44d1541eb4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1881,15 +1881,17 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, else vha->req->cnt = vha->req->length - (vha->req->ring_index - cnt); - } - if (unlikely(vha->req->cnt < (req_cnt + 2))) { - ql_dbg(ql_dbg_io, vha, 0x305a, - "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", - vha->vp_idx, vha->req->ring_index, - vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length); - return -EAGAIN; + if (unlikely(vha->req->cnt < (req_cnt + 2))) { + ql_dbg(ql_dbg_io, vha, 0x305a, + "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", + vha->vp_idx, vha->req->ring_index, + vha->req->cnt, req_cnt, cnt, cnt_in, + vha->req->length); + return -EAGAIN; + } } + vha->req->cnt -= req_cnt; return 0; diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c index c126966130ab..ce79de822e46 100644 --- a/drivers/scsi/scsi_common.c +++ b/drivers/scsi/scsi_common.c @@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info) ucp[3] = 0; put_unaligned_be64(info, &ucp[4]); } else if ((buf[0] & 0x7f) == 0x70) { - buf[0] |= 0x80; - put_unaligned_be64(info, &buf[3]); + /* + * Only set the 'VALID' bit if we can represent the value + * correctly; otherwise just fill out the lower bytes and + * clear the 'VALID' flag. + */ + if (info <= 0xffffffffUL) + buf[0] |= 0x80; + else + buf[0] &= 0x7f; + put_unaligned_be32((u32)info, &buf[3]); } return 0; diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h index 6266a5d73d0f..e659912498bd 100644 --- a/drivers/scsi/scsi_sas_internal.h +++ b/drivers/scsi/scsi_sas_internal.h @@ -4,7 +4,7 @@ #define SAS_HOST_ATTRS 0 #define SAS_PHY_ATTRS 17 #define SAS_PORT_ATTRS 1 -#define SAS_RPORT_ATTRS 7 +#define SAS_RPORT_ATTRS 8 #define SAS_END_DEV_ATTRS 5 #define SAS_EXPANDER_ATTRS 7 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index d16441961f3a..92ffd2406f97 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1105,7 +1105,7 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) return 0; - if (attr == &dev_attr_vpd_pg83 && sdev->vpd_pg83) + if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83) return 0; return S_IRUGO; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 80520e2f0fa2..b6f958193dad 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1286,6 +1286,7 @@ sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols); sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", unsigned long long); sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); +sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32); /* only need 8 bytes of data plus header (4 or 8) */ #define BUF_SIZE 64 @@ -1886,6 +1887,7 @@ sas_attach_transport(struct sas_function_template *ft) SETUP_RPORT_ATTRIBUTE(rphy_device_type); SETUP_RPORT_ATTRIBUTE(rphy_sas_address); SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier); + SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id); SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier, get_enclosure_identifier); SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier, diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 5f4530744e0a..097894a1fab5 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -37,6 +37,7 @@ config SCSI_UFSHCD depends on SCSI && SCSI_DMA select PM_DEVFREQ select DEVFREQ_GOV_SIMPLE_ONDEMAND + select NLS ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 4f38d008bfb4..3aedf73f1131 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. + * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,8 +16,8 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> - #include <linux/phy/phy-qcom-ufs.h> + #include "ufshcd.h" #include "ufshcd-pltfrm.h" #include "unipro.h" @@ -58,6 +58,12 @@ static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, len * 4, false); } +static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, + char *prefix, void *priv) +{ + ufs_qcom_dump_regs(hba, offset, len, prefix); +} + static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) { int err = 0; @@ -106,9 +112,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) if (!host->is_lane_clks_enabled) return; - clk_disable_unprepare(host->tx_l1_sync_clk); + if (host->hba->lanes_per_direction > 1) + clk_disable_unprepare(host->tx_l1_sync_clk); clk_disable_unprepare(host->tx_l0_sync_clk); - clk_disable_unprepare(host->rx_l1_sync_clk); + if (host->hba->lanes_per_direction > 1) + clk_disable_unprepare(host->rx_l1_sync_clk); clk_disable_unprepare(host->rx_l0_sync_clk); host->is_lane_clks_enabled = false; @@ -132,21 +140,24 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) if (err) goto disable_rx_l0; - err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", - host->rx_l1_sync_clk); - if (err) - goto disable_tx_l0; + if (host->hba->lanes_per_direction > 1) { + err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", + host->rx_l1_sync_clk); + if (err) + goto disable_tx_l0; - err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", - host->tx_l1_sync_clk); - if (err) - goto disable_rx_l1; + err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", + host->tx_l1_sync_clk); + if (err) + goto disable_rx_l1; + } host->is_lane_clks_enabled = true; goto out; disable_rx_l1: - clk_disable_unprepare(host->rx_l1_sync_clk); + if (host->hba->lanes_per_direction > 1) + clk_disable_unprepare(host->rx_l1_sync_clk); disable_tx_l0: clk_disable_unprepare(host->tx_l0_sync_clk); disable_rx_l0: @@ -170,14 +181,16 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) if (err) goto out; - err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", - &host->rx_l1_sync_clk); - if (err) - goto out; - - err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", - &host->tx_l1_sync_clk); + /* In case of single lane per direction, don't read lane1 clocks */ + if (host->hba->lanes_per_direction > 1) { + err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", + &host->rx_l1_sync_clk); + if (err) + goto out; + err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", + &host->tx_l1_sync_clk); + } out: return err; } @@ -267,9 +280,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); if (ret) { - dev_err(hba->dev, - "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n", - __func__, ret); + dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", + __func__, ret); goto out; } @@ -519,6 +531,18 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); + /* + * Some UFS devices (and may be host) have issues if LCC is + * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), + 0); + break; case POST_CHANGE: ufs_qcom_link_startup_post_change(hba); @@ -962,6 +986,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, goto out; } + /* enable the device ref clock before changing to HS mode */ + if (!ufshcd_is_hs_mode(&hba->pwr_info) && + ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, true); break; case POST_CHANGE: if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, @@ -989,6 +1017,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); ufs_qcom_update_bus_bw_vote(host); + + /* disable the device ref clock if entered PWM mode */ + if (ufshcd_is_hs_mode(&hba->pwr_info) && + !ufshcd_is_hs_mode(dev_req_params)) + ufs_qcom_dev_ref_clk_ctrl(host, false); break; default: ret = -EINVAL; @@ -1090,6 +1123,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) ufs_qcom_phy_disable_iface_clk(host->generic_phy); goto out; } + /* enable the device ref clock for HS mode*/ + if (ufshcd_is_hs_mode(&hba->pwr_info)) + ufs_qcom_dev_ref_clk_ctrl(host, true); vote = host->bus_vote.saved_vote; if (vote == host->bus_vote.min_bw_vote) ufs_qcom_update_bus_bw_vote(host); @@ -1367,6 +1403,74 @@ out: return err; } +static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, + void *priv, void (*print_fn)(struct ufs_hba *hba, + int offset, int num_regs, char *str, void *priv)) +{ + u32 reg; + struct ufs_qcom_host *host; + + if (unlikely(!hba)) { + pr_err("%s: hba is NULL\n", __func__); + return; + } + if (unlikely(!print_fn)) { + dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); + return; + } + + host = ufshcd_get_variant(hba); + if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) + return; + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); + print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); + + reg = ufshcd_readl(hba, REG_UFS_CFG1); + reg |= UFS_BIT(17); + ufshcd_writel(hba, reg, REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); + print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); + print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); + print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); + + ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); + print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); + print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); + print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); + print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); + print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); + + reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); + print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); +} + +static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) +{ + if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); + else + ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); +} + static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) { /* provide a legal default configuration */ @@ -1475,6 +1579,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) ufshcd_rmwl(host->hba, mask, (u32)host->testbus.select_minor << offset, reg); + ufs_qcom_enable_test_bus(host); ufshcd_release(host->hba); pm_runtime_put_sync(host->hba->dev); @@ -1491,8 +1596,10 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, "HCI Vendor Specific Registers "); + ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); ufs_qcom_testbus_read(hba); } + /** * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * @@ -1537,7 +1644,7 @@ static int ufs_qcom_probe(struct platform_device *pdev) * ufs_qcom_remove - set driver_data of the device to NULL * @pdev: pointer to platform device handle * - * Always return 0 + * Always returns 0 */ static int ufs_qcom_remove(struct platform_device *pdev) { diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 36249b35f858..a19307a57ce2 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -241,6 +241,15 @@ struct ufs_qcom_host { struct ufs_qcom_testbus testbus; }; +static inline u32 +ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg) +{ + if (host->hw_ver.major <= 0x02) + return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg); + + return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg); +}; + #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 54a16cef0367..b291fa6ed2ad 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -43,6 +43,7 @@ #define GENERAL_UPIU_REQUEST_SIZE 32 #define QUERY_DESC_MAX_SIZE 255 #define QUERY_DESC_MIN_SIZE 2 +#define QUERY_DESC_HDR_SIZE 2 #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ (sizeof(struct utp_upiu_header))) @@ -195,6 +196,37 @@ enum unit_desc_param { UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, }; +/* Device descriptor parameters offsets in bytes*/ +enum device_desc_param { + DEVICE_DESC_PARAM_LEN = 0x0, + DEVICE_DESC_PARAM_TYPE = 0x1, + DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2, + DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3, + DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4, + DEVICE_DESC_PARAM_PRTCL = 0x5, + DEVICE_DESC_PARAM_NUM_LU = 0x6, + DEVICE_DESC_PARAM_NUM_WLU = 0x7, + DEVICE_DESC_PARAM_BOOT_ENBL = 0x8, + DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9, + DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA, + DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB, + DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC, + DEVICE_DESC_PARAM_SEC_LU = 0xD, + DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE, + DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF, + DEVICE_DESC_PARAM_SPEC_VER = 0x10, + DEVICE_DESC_PARAM_MANF_DATE = 0x12, + DEVICE_DESC_PARAM_MANF_NAME = 0x14, + DEVICE_DESC_PARAM_PRDCT_NAME = 0x15, + DEVICE_DESC_PARAM_SN = 0x16, + DEVICE_DESC_PARAM_OEM_ID = 0x17, + DEVICE_DESC_PARAM_MANF_ID = 0x18, + DEVICE_DESC_PARAM_UD_OFFSET = 0x1A, + DEVICE_DESC_PARAM_UD_LEN = 0x1B, + DEVICE_DESC_PARAM_RTT_CAP = 0x1C, + DEVICE_DESC_PARAM_FRQ_RTC = 0x1D, +}; + /* * Logical Unit Write Protect * 00h: LU not write protected @@ -469,6 +501,7 @@ struct ufs_vreg { struct regulator *reg; const char *name; bool enabled; + bool unused; int min_uV; int max_uV; int min_uA; diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h new file mode 100644 index 000000000000..ee4ab85e2801 --- /dev/null +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UFS_QUIRKS_H_ +#define _UFS_QUIRKS_H_ + +/* return true if s1 is a prefix of s2 */ +#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1)) + +#define UFS_ANY_VENDOR 0xFFFF +#define UFS_ANY_MODEL "ANY_MODEL" + +#define MAX_MODEL_LEN 16 + +#define UFS_VENDOR_TOSHIBA 0x198 +#define UFS_VENDOR_SAMSUNG 0x1CE + +/** + * ufs_device_info - ufs device details + * @wmanufacturerid: card details + * @model: card model + */ +struct ufs_device_info { + u16 wmanufacturerid; + char model[MAX_MODEL_LEN + 1]; +}; + +/** + * ufs_dev_fix - ufs device quirk info + * @card: ufs card details + * @quirk: device quirk + */ +struct ufs_dev_fix { + struct ufs_device_info card; + unsigned int quirk; +}; + +#define END_FIX { { 0 }, 0 } + +/* add specific device quirk */ +#define UFS_FIX(_vendor, _model, _quirk) \ + { \ + .card.wmanufacturerid = (_vendor),\ + .card.model = (_model), \ + .quirk = (_quirk), \ + } + +/* + * If UFS device is having issue in processing LCC (Line Control + * Command) coming from UFS host controller then enable this quirk. + * When this quirk is enabled, host controller driver should disable + * the LCC transmission on UFS host controller (by clearing + * TX_LCC_ENABLE attribute of host to 0). + */ +#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0) + +/* + * Some UFS devices don't need VCCQ rail for device operations. Enabling this + * quirk for such devices will make sure that VCCQ rail is not voted. + */ +#define UFS_DEVICE_NO_VCCQ (1 << 1) + +/* + * Some vendor's UFS device sends back to back NACs for the DL data frames + * causing the host controller to raise the DFES error status. Sometimes + * such UFS devices send back to back NAC without waiting for new + * retransmitted DL frame from the host and in such cases it might be possible + * the Host UniPro goes into bad state without raising the DFES error + * interrupt. If this happens then all the pending commands would timeout + * only after respective SW command (which is generally too large). + * + * We can workaround such device behaviour like this: + * - As soon as SW sees the DL NAC error, it should schedule the error handler + * - Error handler would sleep for 50ms to see if there are any fatal errors + * raised by UFS controller. + * - If there are fatal errors then SW does normal error recovery. + * - If there are no fatal errors then SW sends the NOP command to device + * to check if link is alive. + * - If NOP command times out, SW does normal error recovery + * - If NOP command succeed, skip the error handling. + * + * If DL NAC error is seen multiple times with some vendor's UFS devices then + * enable this quirk to initiate quick error recovery and also silence related + * error logs to reduce spamming of kernel logs. + */ +#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2) + +/* + * Some UFS devices may not work properly after resume if the link was kept + * in off state during suspend. Enabling this quirk will not allow the + * link to be kept in off state during suspend. + */ +#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3) + +/* + * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as + * 600us which may not be enough for reliable hibern8 exit hardware sequence + * from UFS device. + * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even + * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms. + */ +#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4) + +/* + * Some UFS memory devices may have really low read/write throughput in + * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is + * never enabled for such devices. + */ +#define UFS_DEVICE_NO_FASTAUTO (1 << 5) + +/* + * It seems some UFS devices may keep drawing more than sleep current + * (atleast for 500us) from UFS rails (especially from VCCQ rail). + * To avoid this situation, add 2ms delay before putting these UFS + * rails in LPM mode. + */ +#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6) + +struct ufs_hba; +void ufs_advertise_fixup_device(struct ufs_hba *hba); + +static struct ufs_dev_fix ufs_fixups[] = { + /* UFS cards deviations table */ + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), + UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_NO_FASTAUTO), + UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), + UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG", + UFS_DEVICE_QUIRK_PA_TACTIVATE), + UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", + UFS_DEVICE_QUIRK_PA_TACTIVATE), + + END_FIX +}; +#endif /* UFS_QUIRKS_H_ */ diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index d2a7b127b05c..718f12e09885 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -40,6 +40,8 @@ #include "ufshcd.h" #include "ufshcd-pltfrm.h" +#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2 + static int ufshcd_parse_clock_info(struct ufs_hba *hba) { int ret = 0; @@ -277,6 +279,21 @@ void ufshcd_pltfrm_shutdown(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); +static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + int ret; + + ret = of_property_read_u32(dev->of_node, "lanes-per-direction", + &hba->lanes_per_direction); + if (ret) { + dev_dbg(hba->dev, + "%s: failed to read lanes-per-direction, ret=%d\n", + __func__, ret); + hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION; + } +} + /** * ufshcd_pltfrm_init - probe routine of the driver * @pdev: pointer to Platform device handle @@ -331,6 +348,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); + ufshcd_init_lanes_per_dir(hba); + err = ufshcd_init(hba, mmio_base, irq); if (err) { dev_err(dev, "Initialization failed\n"); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9c1b94bef8f3..f8fa72c31a9d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -39,8 +39,10 @@ #include <linux/async.h> #include <linux/devfreq.h> - +#include <linux/nls.h> +#include <linux/of.h> #include "ufshcd.h" +#include "ufs_quirks.h" #include "unipro.h" #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ @@ -131,9 +133,11 @@ enum { /* UFSHCD UIC layer error flags */ enum { UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ - UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ - UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ - UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ + UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ + UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ + UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ + UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ }; /* Interrupt configuration options */ @@ -193,6 +197,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba); static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused); static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -231,6 +236,16 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) } } +/* replace non-printable or non-ASCII characters with spaces */ +static inline void ufshcd_remove_non_printable(char *val) +{ + if (!val) + return; + + if (*val < 0x20 || *val > 0x7e) + *val = ' '; +} + /* * ufshcd_wait_for_register - wait for register value to change * @hba - per-adapter interface @@ -239,11 +254,13 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) * @val - wait condition * @interval_us - polling interval in microsecs * @timeout_ms - timeout in millisecs + * @can_sleep - perform sleep or just spin * * Returns -ETIMEDOUT on error, zero on success */ -static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long interval_us, unsigned long timeout_ms) +int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, + unsigned long timeout_ms, bool can_sleep) { int err = 0; unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); @@ -252,9 +269,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, val = val & mask; while ((ufshcd_readl(hba, reg) & mask) != val) { - /* wakeup within 50us of expiry */ - usleep_range(interval_us, interval_us + 50); - + if (can_sleep) + usleep_range(interval_us, interval_us + 50); + else + udelay(interval_us); if (time_after(jiffies, timeout)) { if ((ufshcd_readl(hba, reg) & mask) != val) err = -ETIMEDOUT; @@ -552,6 +570,34 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; } +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) +{ + /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ + if ((hba->ufs_version == UFSHCI_VERSION_10) || + (hba->ufs_version == UFSHCI_VERSION_11)) + return UFS_UNIPRO_VER_1_41; + else + return UFS_UNIPRO_VER_1_6; +} +EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); + +static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) +{ + /* + * If both host and device support UniPro ver1.6 or later, PA layer + * parameters tuning happens during link startup itself. + * + * We can manually tune PA layer parameters if either host or device + * doesn't support UniPro ver 1.6 or later. But to keep manual tuning + * logic simple, we will only do manual tuning if local unipro version + * doesn't support ver1.6 or later. + */ + if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6) + return true; + else + return false; +} + static void ufshcd_ungate_work(struct work_struct *work) { int ret; @@ -1458,7 +1504,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag) */ err = ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, - mask, ~mask, 1000, 1000); + mask, ~mask, 1000, 1000, true); return err; } @@ -1857,21 +1903,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba, return ret; } -/** - * ufshcd_query_descriptor - API function for sending descriptor requests - * hba: per-adapter instance - * opcode: attribute opcode - * idn: attribute idn to access - * index: index field - * selector: selector field - * desc_buf: the buffer that contains the descriptor - * buf_len: length parameter passed to the device - * - * Returns 0 for success, non-zero in case of failure. - * The buf_len parameter will contain, on return, the length parameter - * received on the response. - */ -static int ufshcd_query_descriptor(struct ufs_hba *hba, +static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len) { @@ -1936,6 +1968,39 @@ out: } /** + * ufshcd_query_descriptor_retry - API function for sending descriptor + * requests + * hba: per-adapter instance + * opcode: attribute opcode + * idn: attribute idn to access + * index: index field + * selector: selector field + * desc_buf: the buffer that contains the descriptor + * buf_len: length parameter passed to the device + * + * Returns 0 for success, non-zero in case of failure. + * The buf_len parameter will contain, on return, the length parameter + * received on the response. + */ +int ufshcd_query_descriptor_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum desc_idn idn, u8 index, + u8 selector, u8 *desc_buf, int *buf_len) +{ + int err; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = __ufshcd_query_descriptor(hba, opcode, idn, index, + selector, desc_buf, buf_len); + if (!err || err == -EINVAL) + break; + } + + return err; +} +EXPORT_SYMBOL(ufshcd_query_descriptor_retry); + +/** * ufshcd_read_desc_param - read the specified descriptor parameter * @hba: Pointer to adapter instance * @desc_id: descriptor idn value @@ -1977,9 +2042,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, return -ENOMEM; } - ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, - desc_id, desc_index, 0, desc_buf, - &buff_len); + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, desc_buf, + &buff_len); if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || (desc_buf[QUERY_DESC_LENGTH_OFFSET] != @@ -2017,6 +2082,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba, return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); } +int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) +{ + return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); +} +EXPORT_SYMBOL(ufshcd_read_device_desc); + +/** + * ufshcd_read_string_desc - read string descriptor + * @hba: pointer to adapter instance + * @desc_index: descriptor index + * @buf: pointer to buffer where descriptor would be read + * @size: size of buf + * @ascii: if true convert from unicode to ascii characters + * + * Return 0 in case of success, non-zero otherwise + */ +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, + u32 size, bool ascii) +{ + int err = 0; + + err = ufshcd_read_desc(hba, + QUERY_DESC_IDN_STRING, desc_index, buf, size); + + if (err) { + dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", + __func__, QUERY_REQ_RETRIES, err); + goto out; + } + + if (ascii) { + int desc_len; + int ascii_len; + int i; + char *buff_ascii; + + desc_len = buf[0]; + /* remove header and divide by 2 to move from UTF16 to UTF8 */ + ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; + if (size < ascii_len + QUERY_DESC_HDR_SIZE) { + dev_err(hba->dev, "%s: buffer allocated size is too small\n", + __func__); + err = -ENOMEM; + goto out; + } + + buff_ascii = kmalloc(ascii_len, GFP_KERNEL); + if (!buff_ascii) { + err = -ENOMEM; + goto out_free_buff; + } + + /* + * the descriptor contains string in UTF16 format + * we need to convert to utf-8 so it can be displayed + */ + utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], + desc_len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + + /* replace non-printable or non-ASCII characters with spaces */ + for (i = 0; i < ascii_len; i++) + ufshcd_remove_non_printable(&buff_ascii[i]); + + memset(buf + QUERY_DESC_HDR_SIZE, 0, + size - QUERY_DESC_HDR_SIZE); + memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); + buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; +out_free_buff: + kfree(buff_ascii); + } +out: + return err; +} +EXPORT_SYMBOL(ufshcd_read_string_desc); + /** * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter * @hba: Pointer to adapter instance @@ -2814,6 +2955,23 @@ out: } /** + * ufshcd_hba_stop - Send controller to reset state + * @hba: per adapter instance + * @can_sleep: perform sleep or just spin + */ +static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) +{ + int err; + + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, + CONTROLLER_ENABLE, CONTROLLER_DISABLE, + 10, 1, can_sleep); + if (err) + dev_err(hba->dev, "%s: Controller disable failed\n", __func__); +} + +/** * ufshcd_hba_enable - initialize the controller * @hba: per adapter instance * @@ -2833,18 +2991,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) * development and testing of this driver. msleep can be changed to * mdelay and retry count can be reduced based on the controller. */ - if (!ufshcd_is_hba_active(hba)) { - + if (!ufshcd_is_hba_active(hba)) /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); - - /* - * This delay is based on the testing done with UFS host - * controller FPGA. The delay can be changed based on the - * host controller used. - */ - msleep(5); - } + ufshcd_hba_stop(hba, true); /* UniPro link is disabled at this point */ ufshcd_set_link_off(hba); @@ -3365,31 +3514,18 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) } /** - * ufshcd_transfer_req_compl - handle SCSI and query command completion + * __ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance + * @completed_reqs: requests to complete */ -static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, + unsigned long completed_reqs) { struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; - unsigned long completed_reqs; - u32 tr_doorbell; int result; int index; - /* Resetting interrupt aggregation counters first and reading the - * DOOR_BELL afterward allows us to handle all the completed requests. - * In order to prevent other interrupts starvation the DB is read once - * after reset. The down side of this solution is the possibility of - * false interrupt if device completes another request after resetting - * aggregation and before reading the DB. - */ - if (ufshcd_is_intr_aggr_allowed(hba)) - ufshcd_reset_intr_aggr(hba); - - tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - for_each_set_bit(index, &completed_reqs, hba->nutrs) { lrbp = &hba->lrb[index]; cmd = lrbp->cmd; @@ -3419,6 +3555,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) } /** + * ufshcd_transfer_req_compl - handle SCSI and query command completion + * @hba: per adapter instance + */ +static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +{ + unsigned long completed_reqs; + u32 tr_doorbell; + + /* Resetting interrupt aggregation counters first and reading the + * DOOR_BELL afterward allows us to handle all the completed requests. + * In order to prevent other interrupts starvation the DB is read once + * after reset. The down side of this solution is the possibility of + * false interrupt if device completes another request after resetting + * aggregation and before reading the DB. + */ + if (ufshcd_is_intr_aggr_allowed(hba)) + ufshcd_reset_intr_aggr(hba); + + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = tr_doorbell ^ hba->outstanding_reqs; + + __ufshcd_transfer_req_compl(hba, completed_reqs); +} + +/** * ufshcd_disable_ee - disable exception event * @hba: per-adapter instance * @mask: exception event to disable @@ -3630,7 +3791,7 @@ out: */ static int ufshcd_urgent_bkops(struct ufs_hba *hba) { - return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); + return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); } static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) @@ -3639,6 +3800,43 @@ static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); } +static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) +{ + int err; + u32 curr_status = 0; + + if (hba->is_urgent_bkops_lvl_checked) + goto enable_auto_bkops; + + err = ufshcd_get_bkops_status(hba, &curr_status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } + + /* + * We are seeing that some devices are raising the urgent bkops + * exception events even when BKOPS status doesn't indicate performace + * impacted or critical. Handle these device by determining their urgent + * bkops status at runtime. + */ + if (curr_status < BKOPS_STATUS_PERF_IMPACT) { + dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", + __func__, curr_status); + /* update the current status as the urgent bkops level */ + hba->urgent_bkops_lvl = curr_status; + hba->is_urgent_bkops_lvl_checked = true; + } + +enable_auto_bkops: + err = ufshcd_enable_auto_bkops(hba); +out: + if (err < 0) + dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", + __func__, err); +} + /** * ufshcd_exception_event_handler - handle exceptions raised by device * @work: pointer to work data @@ -3662,17 +3860,95 @@ static void ufshcd_exception_event_handler(struct work_struct *work) } status &= hba->ee_ctrl_mask; - if (status & MASK_EE_URGENT_BKOPS) { - err = ufshcd_urgent_bkops(hba); - if (err < 0) - dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", - __func__, err); - } + + if (status & MASK_EE_URGENT_BKOPS) + ufshcd_bkops_exception_event_handler(hba); + out: pm_runtime_put_sync(hba->dev); return; } +/* Complete requests that have door-bell cleared */ +static void ufshcd_complete_requests(struct ufs_hba *hba) +{ + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); +} + +/** + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is + * to recover from the DL NAC errors or not. + * @hba: per-adapter instance + * + * Returns true if error handling is required, false otherwise + */ +static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) +{ + unsigned long flags; + bool err_handling = true; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the + * device fatal error and/or DL NAC & REPLAY timeout errors. + */ + if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) + goto out; + + if ((hba->saved_err & DEVICE_FATAL_ERROR) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) + goto out; + + if ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { + int err; + /* + * wait for 50ms to see if we can get any other errors or not. + */ + spin_unlock_irqrestore(hba->host->host_lock, flags); + msleep(50); + spin_lock_irqsave(hba->host->host_lock, flags); + + /* + * now check if we have got any other severe errors other than + * DL NAC error? + */ + if ((hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) + goto out; + + /* + * As DL NAC is the only error received so far, send out NOP + * command to confirm if link is still active or not. + * - If we don't get any response then do error recovery. + * - If we get response then clear the DL NAC error bit. + */ + + spin_unlock_irqrestore(hba->host->host_lock, flags); + err = ufshcd_verify_dev_init(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + + if (err) + goto out; + + /* Link seems to be alive hence ignore the DL NAC errors */ + if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) + hba->saved_err &= ~UIC_ERROR; + /* clear NAC error */ + hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + if (!hba->saved_uic_err) { + err_handling = false; + goto out; + } + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return err_handling; +} + /** * ufshcd_err_handler - handle UFS errors that require s/w attention * @work: pointer to work structure @@ -3685,6 +3961,7 @@ static void ufshcd_err_handler(struct work_struct *work) u32 err_tm = 0; int err = 0; int tag; + bool needs_reset = false; hba = container_of(work, struct ufs_hba, eh_work); @@ -3692,40 +3969,86 @@ static void ufshcd_err_handler(struct work_struct *work) ufshcd_hold(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ufshcd_state == UFSHCD_STATE_RESET) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + if (hba->ufshcd_state == UFSHCD_STATE_RESET) goto out; - } hba->ufshcd_state = UFSHCD_STATE_RESET; ufshcd_set_eh_in_progress(hba); /* Complete requests that have door-bell cleared by h/w */ - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_complete_requests(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + bool ret; + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ + ret = ufshcd_quirk_dl_nac_errors(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + if (!ret) + goto skip_err_handling; + } + if ((hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR | + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | + UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) + needs_reset = true; + + /* + * if host reset is required then skip clearing the pending + * transfers forcefully because they will automatically get + * cleared after link startup. + */ + if (needs_reset) + goto skip_pending_xfer_clear; + + /* release lock as clear command might sleep */ + spin_unlock_irqrestore(hba->host->host_lock, flags); /* Clear pending transfer requests */ - for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) - if (ufshcd_clear_cmd(hba, tag)) - err_xfer |= 1 << tag; + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { + if (ufshcd_clear_cmd(hba, tag)) { + err_xfer = true; + goto lock_skip_pending_xfer_clear; + } + } /* Clear pending task management requests */ - for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) - if (ufshcd_clear_tm_cmd(hba, tag)) - err_tm |= 1 << tag; + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { + if (ufshcd_clear_tm_cmd(hba, tag)) { + err_tm = true; + goto lock_skip_pending_xfer_clear; + } + } - /* Complete the requests that are cleared by s/w */ +lock_skip_pending_xfer_clear: spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Complete the requests that are cleared by s/w */ + ufshcd_complete_requests(hba); + + if (err_xfer || err_tm) + needs_reset = true; + +skip_pending_xfer_clear: /* Fatal errors need reset */ - if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || - ((hba->saved_err & UIC_ERROR) && - (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { + if (needs_reset) { + unsigned long max_doorbells = (1UL << hba->nutrs) - 1; + + /* + * ufshcd_reset_and_restore() does the link reinitialization + * which will need atleast one empty doorbell slot to send the + * device management commands (NOP and query commands). + * If there is no slot empty at this moment then free up last + * slot forcefully. + */ + if (hba->outstanding_reqs == max_doorbells) + __ufshcd_transfer_req_compl(hba, + (1UL << (hba->nutrs - 1))); + + spin_unlock_irqrestore(hba->host->host_lock, flags); err = ufshcd_reset_and_restore(hba); + spin_lock_irqsave(hba->host->host_lock, flags); if (err) { dev_err(hba->dev, "%s: reset and restore failed\n", __func__); @@ -3739,9 +4062,19 @@ static void ufshcd_err_handler(struct work_struct *work) hba->saved_err = 0; hba->saved_uic_err = 0; } + +skip_err_handling: + if (!needs_reset) { + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + if (hba->saved_err || hba->saved_uic_err) + dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", + __func__, hba->saved_err, hba->saved_uic_err); + } + ufshcd_clear_eh_in_progress(hba); out: + spin_unlock_irqrestore(hba->host->host_lock, flags); scsi_unblock_requests(hba->host); ufshcd_release(hba); pm_runtime_put_sync(hba->dev); @@ -3759,6 +4092,14 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } /* UIC NL/TL/DME errors needs software retry */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); @@ -3796,15 +4137,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba) } if (queue_eh_work) { + /* + * update the transfer error masks to sticky bits, let's do this + * irrespective of current ufshcd_state. + */ + hba->saved_err |= hba->errors; + hba->saved_uic_err |= hba->uic_error; + /* handle fatal errors only when link is functional */ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { /* block commands from scsi mid-layer */ scsi_block_requests(hba->host); - /* transfer error masks to sticky bits */ - hba->saved_err |= hba->errors; - hba->saved_uic_err |= hba->uic_error; - hba->ufshcd_state = UFSHCD_STATE_ERROR; schedule_work(&hba->eh_work); } @@ -3897,7 +4241,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, REG_UTP_TASK_REQ_DOOR_BELL, - mask, 0, 1000, 1000); + mask, 0, 1000, 1000, true); out: return err; } @@ -4179,7 +4523,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) /* Reset the host controller */ spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, false); spin_unlock_irqrestore(hba->host->host_lock, flags); err = ufshcd_hba_enable(hba); @@ -4466,6 +4810,164 @@ out: return ret; } +static int ufs_get_device_info(struct ufs_hba *hba, + struct ufs_device_info *card_data) +{ + int err; + u8 model_index; + u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0}; + u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; + + err = ufshcd_read_device_desc(hba, desc_buf, + QUERY_DESC_DEVICE_MAX_SIZE); + if (err) { + dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", + __func__, err); + goto out; + } + + /* + * getting vendor (manufacturerID) and Bank Index in big endian + * format + */ + card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; + + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + + err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, + QUERY_DESC_STRING_MAX_SIZE, ASCII_STD); + if (err) { + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", + __func__, err); + goto out; + } + + str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; + strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), + min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], + MAX_MODEL_LEN)); + + /* Null terminate the model string */ + card_data->model[MAX_MODEL_LEN] = '\0'; + +out: + return err; +} + +void ufs_advertise_fixup_device(struct ufs_hba *hba) +{ + int err; + struct ufs_dev_fix *f; + struct ufs_device_info card_data; + + card_data.wmanufacturerid = 0; + + err = ufs_get_device_info(hba, &card_data); + if (err) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, err); + return; + } + + for (f = ufs_fixups; f->quirk; f++) { + if (((f->card.wmanufacturerid == card_data.wmanufacturerid) || + (f->card.wmanufacturerid == UFS_ANY_VENDOR)) && + (STR_PRFX_EQUAL(f->card.model, card_data.model) || + !strcmp(f->card.model, UFS_ANY_MODEL))) + hba->dev_quirks |= f->quirk; + } +} + +/** + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro + * @hba: per-adapter instance + * + * PA_TActivate parameter can be tuned manually if UniPro version is less than + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce + * the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) +{ + int ret = 0; + u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL( + RX_MIN_ACTIVATETIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_min_activatetime); + if (ret) + goto out; + + /* make sure proper unit conversion is applied */ + tuned_pa_tactivate = + ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) + / PA_TACTIVATE_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + tuned_pa_tactivate); + +out: + return ret; +} + +/** + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro + * @hba: per-adapter instance + * + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. + * This optimal value can help reduce the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) +{ + int ret = 0; + u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; + u32 max_hibern8_time, tuned_pa_hibern8time; + + ret = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &local_tx_hibern8_time_cap); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_hibern8_time_cap); + if (ret) + goto out; + + max_hibern8_time = max(local_tx_hibern8_time_cap, + peer_rx_hibern8_time_cap); + /* make sure proper unit conversion is applied */ + tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) + / PA_HIBERN8_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + tuned_pa_hibern8time); +out: + return ret; +} + +static void ufshcd_tune_unipro_params(struct ufs_hba *hba) +{ + if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { + ufshcd_tune_pa_tactivate(hba); + ufshcd_tune_pa_hibern8time(hba); + } + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) + /* set 1ms timeout for PA_TACTIVATE */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance @@ -4482,6 +4984,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_init_pwr_info(hba); + /* set the default level for urgent bkops */ + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; + hba->is_urgent_bkops_lvl_checked = false; + /* UniPro link is active now */ ufshcd_set_link_active(hba); @@ -4493,6 +4999,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; + ufs_advertise_fixup_device(hba); + ufshcd_tune_unipro_params(hba); + + ret = ufshcd_set_vccq_rail_unused(hba, + (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false); + if (ret) + goto out; + /* UFS device is also active now */ ufshcd_set_ufs_dev_active(hba); ufshcd_force_reset_auto_bkops(hba); @@ -4567,6 +5081,41 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) ufshcd_probe_hba(hba); } +static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd) +{ + unsigned long flags; + struct Scsi_Host *host; + struct ufs_hba *hba; + int index; + bool found = false; + + if (!scmd || !scmd->device || !scmd->device->host) + return BLK_EH_NOT_HANDLED; + + host = scmd->device->host; + hba = shost_priv(host); + if (!hba) + return BLK_EH_NOT_HANDLED; + + spin_lock_irqsave(host->host_lock, flags); + + for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { + if (hba->lrb[index].cmd == scmd) { + found = true; + break; + } + } + + spin_unlock_irqrestore(host->host_lock, flags); + + /* + * Bypass SCSI error handling and reset the block layer timer if this + * SCSI command was not actually dispatched to UFS driver, otherwise + * let SCSI layer handle the error as usual. + */ + return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER; +} + static struct scsi_host_template ufshcd_driver_template = { .module = THIS_MODULE, .name = UFSHCD, @@ -4579,6 +5128,7 @@ static struct scsi_host_template ufshcd_driver_template = { .eh_abort_handler = ufshcd_abort, .eh_device_reset_handler = ufshcd_eh_device_reset_handler, .eh_host_reset_handler = ufshcd_eh_host_reset_handler, + .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -4607,13 +5157,24 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { - return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); + if (!vreg) + return 0; + else if (vreg->unused) + return 0; + else + return ufshcd_config_vreg_load(hba->dev, vreg, + UFS_VREG_LPM_LOAD_UA); } static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { - return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); + if (!vreg) + return 0; + else if (vreg->unused) + return 0; + else + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } static int ufshcd_config_vreg(struct device *dev, @@ -4648,7 +5209,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; - if (!vreg || vreg->enabled) + if (!vreg) + goto out; + else if (vreg->enabled || vreg->unused) goto out; ret = ufshcd_config_vreg(dev, vreg, true); @@ -4668,7 +5231,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; - if (!vreg || !vreg->enabled) + if (!vreg) + goto out; + else if (!vreg->enabled || vreg->unused) goto out; ret = regulator_disable(vreg->reg); @@ -4774,6 +5339,36 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba) return 0; } +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused) +{ + int ret = 0; + struct ufs_vreg_info *info = &hba->vreg_info; + + if (!info) + goto out; + else if (!info->vccq) + goto out; + + if (unused) { + /* shut off the rail here */ + ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false); + /* + * Mark this rail as no longer used, so it doesn't get enabled + * later by mistake + */ + if (!ret) + info->vccq->unused = true; + } else { + /* + * rail should have been already enabled hence just make sure + * that unused flag is cleared. + */ + info->vccq->unused = false; + } +out: + return ret; +} + static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk) { @@ -5093,10 +5688,20 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, (!check_for_bkops || (check_for_bkops && !hba->auto_bkops_enabled))) { /* + * Let's make sure that link is in low power mode, we are doing + * this currently by putting the link in Hibern8. Otherway to + * put the link in low power mode is to send the DME end point + * to device and then send the DME reset command to local + * unipro. But putting the link in hibern8 is much faster. + */ + ret = ufshcd_uic_hibern8_enter(hba); + if (ret) + goto out; + /* * Change controller state to "reset state" which * should also put the link in off/reset state */ - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, true); /* * TODO: Check if we need any delay to make sure that * controller is reset @@ -5111,6 +5716,16 @@ out: static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) { /* + * It seems some UFS devices may keep drawing more than sleep current + * (atleast for 500us) from UFS rails (especially from VCCQ rail). + * To avoid this situation, add 2ms delay before putting these UFS + * rails in LPM mode. + */ + if (!ufshcd_is_link_active(hba) && + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) + usleep_range(2000, 2100); + + /* * If UFS device is either in UFS_Sleep turn off VCC rail to save some * power. * @@ -5572,7 +6187,7 @@ void ufshcd_remove(struct ufs_hba *hba) scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, true); scsi_host_put(hba->host); @@ -5836,6 +6451,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) init_waitqueue_head(&hba->dev_cmd.tag_wq); ufshcd_init_clk_gating(hba); + + /* + * In order to avoid any spurious interrupt immediately after + * registering UFS controller interrupt handler, clear any pending UFS + * interrupt status and disable all the UFS interrupts. + */ + ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), + REG_INTERRUPT_STATUS); + ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); + /* + * Make sure that UFS interrupts are disabled and any pending interrupt + * status is cleared before registering UFS interrupt handler. + */ + mb(); + /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); if (err) { diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index e3931d0c94eb..4bb65669f052 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -54,6 +54,7 @@ #include <linux/clk.h> #include <linux/completion.h> #include <linux/regulator/consumer.h> +#include "unipro.h" #include <asm/irq.h> #include <asm/byteorder.h> @@ -383,6 +384,9 @@ struct ufs_init_prefetch { * @clk_list_head: UFS host controller clocks list node head * @pwr_info: holds current power mode * @max_pwr_info: keeps the device max valid pwm + * @urgent_bkops_lvl: keeps track of urgent bkops level for device + * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for + * device is known or not. */ struct ufs_hba { void __iomem *mmio_base; @@ -470,6 +474,9 @@ struct ufs_hba { unsigned int quirks; /* Deviations from standard UFSHCI spec. */ + /* Device deviations from standard UFS device spec. */ + unsigned int dev_quirks; + wait_queue_head_t tm_wq; wait_queue_head_t tm_tag_wq; unsigned long tm_condition; @@ -509,6 +516,8 @@ struct ufs_hba { bool wlun_dev_clr_ua; + /* Number of lanes available (1 or 2) for Rx/Tx */ + u32 lanes_per_direction; struct ufs_pa_layer_attr pwr_info; struct ufs_pwr_mode_info max_pwr_info; @@ -533,6 +542,9 @@ struct ufs_hba { struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; bool is_sys_suspended; + + enum bkops_status urgent_bkops_lvl; + bool is_urgent_bkops_lvl_checked; }; /* Returns true if clocks can be gated. Otherwise false */ @@ -588,15 +600,9 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **); void ufshcd_dealloc_host(struct ufs_hba *); int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); void ufshcd_remove(struct ufs_hba *); - -/** - * ufshcd_hba_stop - Send controller to reset state - * @hba: per adapter instance - */ -static inline void ufshcd_hba_stop(struct ufs_hba *hba) -{ - ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); -} +int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, + unsigned long timeout_ms, bool can_sleep); static inline void check_upiu_size(void) { @@ -682,11 +688,27 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); } +int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size); + +static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) +{ + return (pwr_info->pwr_rx == FAST_MODE || + pwr_info->pwr_rx == FASTAUTO_MODE) && + (pwr_info->pwr_tx == FAST_MODE || + pwr_info->pwr_tx == FASTAUTO_MODE); +} + +#define ASCII_STD true + +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, + u32 size, bool ascii); + /* Expose Query-Request API */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); /* Wrapper functions for safely calling variant operations */ static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 0ae0967aaed8..4cb1cc63f1a1 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -92,6 +92,7 @@ enum { UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */ UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */ UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */ + UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */ }; /* @@ -170,6 +171,8 @@ enum { #define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF #define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 +#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001 +#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002 /* UECN - Host UIC Error Code Network Layer 40h */ #define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) @@ -209,6 +212,7 @@ enum { /* GenSelectorIndex calculation macros for M-PHY attributes */ #define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane) +#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane)) #define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ ((sel) & 0xFFFF)) diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index 816a8a46efb8..e2854e45f8d3 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -15,6 +15,7 @@ /* * M-TX Configuration Attributes */ +#define TX_HIBERN8TIME_CAPABILITY 0x000F #define TX_MODE 0x0021 #define TX_HSRATE_SERIES 0x0022 #define TX_HSGEAR 0x0023 @@ -48,8 +49,12 @@ #define RX_ENTER_HIBERN8 0x00A7 #define RX_BYPASS_8B10B_ENABLE 0x00A8 #define RX_TERMINATION_FORCE_ENABLE 0x0089 +#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F +#define RX_HIBERN8TIME_CAPABILITY 0x0092 #define is_mphy_tx_attr(attr) (attr < RX_MODE) +#define RX_MIN_ACTIVATETIME_UNIT_US 100 +#define HIBERN8TIME_UNIT_US 100 /* * PHY Adpater attributes */ @@ -70,6 +75,7 @@ #define PA_MAXRXSPEEDFAST 0x1541 #define PA_MAXRXSPEEDSLOW 0x1542 #define PA_TXLINKSTARTUPHS 0x1544 +#define PA_LOCAL_TX_LCC_ENABLE 0x155E #define PA_TXSPEEDFAST 0x1565 #define PA_TXSPEEDSLOW 0x1566 #define PA_REMOTEVERINFO 0x15A0 @@ -110,6 +116,12 @@ #define PA_STALLNOCONFIGTIME 0x15A3 #define PA_SAVECONFIGTIME 0x15A4 +#define PA_TACTIVATE_TIME_UNIT_US 10 +#define PA_HIBERN8_TIME_UNIT_US 100 + +/* PHY Adapter Protocol Constants */ +#define PA_MAXDATALANES 4 + /* PA power modes */ enum { FAST_MODE = 1, @@ -143,6 +155,16 @@ enum ufs_hs_gear_tag { UFS_HS_G3, /* HS Gear 3 */ }; +enum ufs_unipro_ver { + UFS_UNIPRO_VER_RESERVED = 0, + UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */ + UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */ + UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */ + UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */ + /* UniPro version field mask in PA_LOCALVERINFO */ + UFS_UNIPRO_VER_MASK = 0xF, +}; + /* * Data Link Layer Attributes */ diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index bceb81309787..85365672c931 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1141,14 +1141,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, return PTR_ERR_OR_ZERO(vaddr); } -static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) +static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; mutex_lock(&buffer->lock); ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); + + return 0; } static struct dma_buf_ops dma_buf_ops = { diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c index 9d47c5db24a6..163f21a1298d 100644 --- a/drivers/staging/mt29f_spinand/mt29f_spinand.c +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c @@ -49,7 +49,6 @@ static struct nand_ecclayout spinand_oob_64 = { 17, 18, 19, 20, 21, 22, 33, 34, 35, 36, 37, 38, 49, 50, 51, 52, 53, 54, }, - .oobavail = 32, .oobfree = { {.offset = 8, .length = 8}, diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h index ae62975cf44a..457dc7ffdaf1 100644 --- a/drivers/staging/mt29f_spinand/mt29f_spinand.h +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.h @@ -78,7 +78,6 @@ #define BL_ALL_UNLOCKED 0 struct spinand_info { - struct nand_ecclayout *ecclayout; struct spi_device *spi; void *priv; }; |