diff options
Diffstat (limited to 'drivers/mtd/nand')
42 files changed, 3941 insertions, 1137 deletions
diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig index ae0b8fe5b990..572b8fe69abb 100644 --- a/drivers/mtd/nand/onenand/Kconfig +++ b/drivers/mtd/nand/onenand/Kconfig @@ -25,7 +25,7 @@ config MTD_ONENAND_GENERIC config MTD_ONENAND_OMAP2 tristate "OneNAND on OMAP2/OMAP3 support" - depends on ARCH_OMAP2 || ARCH_OMAP3 + depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM) depends on OF || COMPILE_TEST help Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC @@ -33,12 +33,12 @@ config MTD_ONENAND_OMAP2 Enable dmaengine and gpiolib for better performance. config MTD_ONENAND_SAMSUNG - tristate "OneNAND on Samsung SOC controller support" - depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4 - help - Support for a OneNAND flash device connected to an Samsung SOC. - S3C64XX uses command mapping method. - S5PC110/S5PC210 use generic OneNAND method. + tristate "OneNAND on Samsung SOC controller support" + depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4 || COMPILE_TEST + help + Support for a OneNAND flash device connected to an Samsung SOC. + S3C64XX uses command mapping method. + S5PC110/S5PC210 use generic OneNAND method. config MTD_ONENAND_OTP bool "OneNAND OTP Support" diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile index f8b624aca9cc..a0761c7e0288 100644 --- a/drivers/mtd/nand/onenand/Makefile +++ b/drivers/mtd/nand/onenand/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o # Board specific. obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o -obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o -obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o +obj-$(CONFIG_MTD_ONENAND_OMAP2) += onenand_omap2.o +obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += onenand_samsung.o onenand-objs = onenand_base.o onenand_bbt.o diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index e082d632fb74..d5326d19b136 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, stats = mtd->ecc_stats; - /* Read-while-load method */ + /* Read-while-load method */ - /* Do first load to bufferRAM */ - if (read < len) { - if (!onenand_check_bufferram(mtd, from)) { + /* Do first load to bufferRAM */ + if (read < len) { + if (!onenand_check_bufferram(mtd, from)) { this->command(mtd, ONENAND_CMD_READ, from, writesize); - ret = this->wait(mtd, FL_READING); - onenand_update_bufferram(mtd, from, !ret); + ret = this->wait(mtd, FL_READING); + onenand_update_bufferram(mtd, from, !ret); if (mtd_is_eccerr(ret)) ret = 0; - } - } + } + } thislen = min_t(int, writesize, len - read); column = from & (writesize - 1); if (column + thislen > writesize) thislen = writesize - column; - while (!ret) { - /* If there is more to load then start next load */ - from += thislen; - if (read + thislen < len) { + while (!ret) { + /* If there is more to load then start next load */ + from += thislen; + if (read + thislen < len) { this->command(mtd, ONENAND_CMD_READ, from, writesize); - /* - * Chip boundary handling in DDP - * Now we issued chip 1 read and pointed chip 1 + /* + * Chip boundary handling in DDP + * Now we issued chip 1 read and pointed chip 1 * bufferram so we have to point chip 0 bufferram. - */ - if (ONENAND_IS_DDP(this) && - unlikely(from == (this->chipsize >> 1))) { - this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2); - boundary = 1; - } else - boundary = 0; - ONENAND_SET_PREV_BUFFERRAM(this); - } - /* While load is going, read from last bufferRAM */ - this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); + */ + if (ONENAND_IS_DDP(this) && + unlikely(from == (this->chipsize >> 1))) { + this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2); + boundary = 1; + } else + boundary = 0; + ONENAND_SET_PREV_BUFFERRAM(this); + } + /* While load is going, read from last bufferRAM */ + this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); /* Read oob area if needed */ if (oobbuf) { @@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, oobcolumn = 0; } - /* See if we are done */ - read += thislen; - if (read == len) - break; - /* Set up for next read from bufferRAM */ - if (unlikely(boundary)) - this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2); - ONENAND_SET_NEXT_BUFFERRAM(this); - buf += thislen; + /* See if we are done */ + read += thislen; + if (read == len) + break; + /* Set up for next read from bufferRAM */ + if (unlikely(boundary)) + this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2); + ONENAND_SET_NEXT_BUFFERRAM(this); + buf += thislen; thislen = min_t(int, writesize, len - read); - column = 0; - cond_resched(); - /* Now wait for load */ - ret = this->wait(mtd, FL_READING); - onenand_update_bufferram(mtd, from, !ret); + column = 0; + cond_resched(); + /* Now wait for load */ + ret = this->wait(mtd, FL_READING); + onenand_update_bufferram(mtd, from, !ret); if (mtd_is_eccerr(ret)) ret = 0; - } + } /* * Return success, if no ECC failures, else -EBADMSG @@ -2853,7 +2853,7 @@ static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); status &= 0x60; @@ -2924,7 +2924,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); return ret; } @@ -2968,7 +2968,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); return ret; } @@ -3008,7 +3008,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); } else { ops.mode = MTD_OPS_PLACE_OOB; ops.ooblen = len; @@ -3413,7 +3413,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd) this->boundary[die] = bdry & FLEXONENAND_PI_MASK; this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); printk(KERN_INFO "Die %d boundary: %d%s\n", die, this->boundary[die], locked ? "(Locked)" : "(Unlocked)"); @@ -3635,7 +3635,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die, ret = this->wait(mtd, FL_WRITING); out: this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); if (!ret) /* Recalculate device size on boundary change*/ flexonenand_get_size(mtd); @@ -3671,7 +3671,7 @@ static int onenand_chip_probe(struct mtd_info *mtd) /* Reset OneNAND to read default register values */ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM); /* Wait reset */ - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); /* Restore system configuration 1 */ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); @@ -3880,6 +3880,9 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) if (!this->oob_buf) { if (this->options & ONENAND_PAGEBUF_ALLOC) { this->options &= ~ONENAND_PAGEBUF_ALLOC; +#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE + kfree(this->verify_buf); +#endif kfree(this->page_buf); } return -ENOMEM; diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c index edf94ee54ec7..aa9368bf7a0c 100644 --- a/drivers/mtd/nand/onenand/omap2.c +++ b/drivers/mtd/nand/onenand/onenand_omap2.c @@ -148,13 +148,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) unsigned long timeout; u32 syscfg; - if (state == FL_RESETING || state == FL_PREPARING_ERASE || + if (state == FL_RESETTING || state == FL_PREPARING_ERASE || state == FL_VERIFYING_ERASE) { int i = 21; unsigned int intr_flags = ONENAND_INT_MASTER; switch (state) { - case FL_RESETING: + case FL_RESETTING: intr_flags |= ONENAND_INT_RESET; break; case FL_PREPARING_ERASE: @@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c, struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; - tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0); + tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!tx) { dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n"); return -EIO; @@ -375,7 +376,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, * context fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress ) + count < 384 || in_interrupt() || oops_in_progress) goto out_copy; xtra = count & 3; @@ -422,7 +423,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, * context fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress ) + count < 384 || in_interrupt() || oops_in_progress) goto out_copy; dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE); @@ -528,7 +529,8 @@ static int omap2_onenand_probe(struct platform_device *pdev) c->gpmc_cs, c->phys_base, c->onenand.base, c->dma_chan ? "DMA" : "PIO"); - if ((r = onenand_scan(&c->mtd, 1)) < 0) + r = onenand_scan(&c->mtd, 1); + if (r < 0) goto err_release_dma; freq = omap2_onenand_get_freq(c->onenand.version_id); diff --git a/drivers/mtd/nand/onenand/samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c index 55e5536a5850..87b28e397d67 100644 --- a/drivers/mtd/nand/onenand/samsung.c +++ b/drivers/mtd/nand/onenand/onenand_samsung.c @@ -248,7 +248,7 @@ static unsigned short s3c_onenand_readw(void __iomem *addr) } /* BootRAM access control */ - if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) { + if ((unsigned long)addr < ONENAND_DATARAM && onenand->bootram_command) { if (word_addr == 0) return s3c_read_reg(MANUFACT_ID_OFFSET); if (word_addr == 1) @@ -289,7 +289,7 @@ static void s3c_onenand_writew(unsigned short value, void __iomem *addr) } /* BootRAM access control */ - if ((unsigned int)addr < ONENAND_DATARAM) { + if ((unsigned long)addr < ONENAND_DATARAM) { if (value == ONENAND_CMD_READID) { onenand->bootram_command = 1; return; @@ -658,7 +658,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE); } if (dma_mapping_error(dev, dma_dst)) { - dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count); + dev_err(dev, "Couldn't map a %zu byte buffer for DMA\n", count); goto normal; } err = s5pc110_dma_ops(dma_dst, dma_src, @@ -675,12 +675,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, normal: if (count != mtd->writesize) { /* Copy the bufferram to memory to prevent unaligned access */ - memcpy(this->page_buf, p, mtd->writesize); - p = this->page_buf + offset; + memcpy_fromio(this->page_buf, p, mtd->writesize); + memcpy(buffer, this->page_buf + offset, count); + } else { + memcpy_fromio(buffer, p, count); } - memcpy(buffer, p, count); - return 0; } @@ -728,13 +728,12 @@ static void s3c_onenand_check_lock_status(struct mtd_info *mtd) struct onenand_chip *this = mtd->priv; struct device *dev = &onenand->pdev->dev; unsigned int block, end; - int tmp; end = this->chipsize >> this->erase_shift; for (block = 0; block < end; block++) { unsigned int mem_addr = onenand->mem_addr(block, 0, 0); - tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr)); + s3c_read_cmd(CMD_MAP_01(onenand, mem_addr)); if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) { dev_err(dev, "block %d is write-protected!\n", block); diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 5a711d8beaca..a80a46bb5b8b 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -351,14 +351,6 @@ config MTD_NAND_SOCRATES help Enables support for NAND Flash chips wired onto Socrates board. -config MTD_NAND_NUC900 - tristate "Nuvoton NUC9xx/w90p910 NAND controller" - depends on ARCH_W90X900 || COMPILE_TEST - depends on HAS_IOMEM - help - This enables the driver for the NAND Flash on evaluation board based - on w90p910 / NUC9xx. - source "drivers/mtd/nand/raw/ingenic/Kconfig" config MTD_NAND_FSMC @@ -407,6 +399,12 @@ config MTD_NAND_MTK Enables support for NAND controller on MTK SoCs. This controller is found on mt27xx, mt81xx, mt65xx SoCs. +config MTD_NAND_MXIC + tristate "Macronix raw NAND controller" + depends on HAS_IOMEM || COMPILE_TEST + help + This selects the Macronix raw NAND controller driver. + config MTD_NAND_TEGRA tristate "NVIDIA Tegra NAND controller" depends on ARCH_TEGRA || COMPILE_TEST @@ -452,6 +450,13 @@ config MTD_NAND_PLATFORM devices. You will need to provide platform-specific functions via platform_data. +config MTD_NAND_CADENCE + tristate "Support Cadence NAND (HPNFC) controller" + depends on (OF || COMPILE_TEST) && HAS_IOMEM + help + Enable the driver for NAND flash on platforms using a Cadence NAND + controller. + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index efaf5cd25edc..2d136b158fb7 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -41,7 +41,6 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o -obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o obj-$(CONFIG_MTD_NAND_RICOH) += r852.o @@ -54,9 +53,11 @@ obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o +obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o +obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 8d6be90a6fe8..3ba17a98df4d 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1578,9 +1578,8 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc, nand->numcs = numcs; - gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0, - &np->fwnode, GPIOD_IN, - "nand-det"); + gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np), + "det", GPIOD_IN, "nand-det"); if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) { dev_err(nc->dev, "Failed to get detect gpio (err = %ld)\n", @@ -1624,9 +1623,10 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc, nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB; nand->cs[i].rb.id = val; } else { - gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, - "rb", i, &np->fwnode, - GPIOD_IN, "nand-rb"); + gpio = devm_fwnode_gpiod_get_index(nc->dev, + of_fwnode_handle(np), + "rb", i, GPIOD_IN, + "nand-rb"); if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) { dev_err(nc->dev, "Failed to get R/B gpio (err = %ld)\n", @@ -1640,10 +1640,10 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc, } } - gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs", - i, &np->fwnode, - GPIOD_OUT_HIGH, - "nand-cs"); + gpio = devm_fwnode_gpiod_get_index(nc->dev, + of_fwnode_handle(np), + "cs", i, GPIOD_OUT_HIGH, + "nand-cs"); if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) { dev_err(nc->dev, "Failed to get CS gpio (err = %ld)\n", diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 97a97a9ccc36..75eb3e97fae3 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -134,16 +134,15 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len) /** * au_read_buf16 - read chip data into buffer - * @mtd: MTD device structure + * @this: NAND chip object * @buf: buffer to store date * @len: number of bytes to read * * read function for 16bit buswidth */ -static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) +static void au_read_buf16(struct nand_chip *this, u_char *buf, int len) { int i; - struct nand_chip *this = mtd_to_nand(mtd); u16 *p = (u16 *) buf; len >>= 1; @@ -405,7 +404,7 @@ static int au1550nd_probe(struct platform_device *pdev) goto out1; } - ctx->base = ioremap_nocache(r->start, 0x1000); + ctx->base = ioremap(r->start, 0x1000); if (!ctx->base) { dev_err(&pdev->dev, "cannot remap NAND memory area\n"); ret = -ENODEV; diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 33310b8a6eb8..44518dada75b 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -117,6 +117,18 @@ enum flash_dma_reg { FLASH_DMA_CURRENT_DESC_EXT, }; +/* flash_dma registers v0*/ +static const u16 flash_dma_regs_v0[] = { + [FLASH_DMA_REVISION] = 0x00, + [FLASH_DMA_FIRST_DESC] = 0x04, + [FLASH_DMA_CTRL] = 0x08, + [FLASH_DMA_MODE] = 0x0c, + [FLASH_DMA_STATUS] = 0x10, + [FLASH_DMA_INTERRUPT_DESC] = 0x14, + [FLASH_DMA_ERROR_STATUS] = 0x18, + [FLASH_DMA_CURRENT_DESC] = 0x1c, +}; + /* flash_dma registers v1*/ static const u16 flash_dma_regs_v1[] = { [FLASH_DMA_REVISION] = 0x00, @@ -597,6 +609,8 @@ static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl) /* flash_dma register offsets */ if (ctrl->nand_version >= 0x0703) ctrl->flash_dma_offsets = flash_dma_regs_v4; + else if (ctrl->nand_version == 0x0602) + ctrl->flash_dma_offsets = flash_dma_regs_v0; else ctrl->flash_dma_offsets = flash_dma_regs_v1; } @@ -918,7 +932,7 @@ static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl) return; if (has_flash_dma(ctrl)) { - ctrl->flash_dma_base = 0; + ctrl->flash_dma_base = NULL; disable_irq(ctrl->dma_irq); } @@ -1673,8 +1687,11 @@ static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc) flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc)); (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC); - flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc)); - (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); + if (ctrl->nand_version > 0x0602) { + flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, + upper_32_bits(desc)); + (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); + } /* Start FLASH_DMA engine */ ctrl->dma_pending = true; @@ -1792,6 +1809,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, int bitflips = 0; int page = addr >> chip->page_shift; int ret; + void *ecc_chunk; if (!buf) buf = nand_get_data_buf(chip); @@ -1804,7 +1822,9 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, return ret; for (i = 0; i < chip->ecc.steps; i++, oob += sas) { - ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, + ecc_chunk = buf + chip->ecc.size * i; + ret = nand_check_erased_ecc_chunk(ecc_chunk, + chip->ecc.size, oob, sas, NULL, 0, chip->ecc.strength); if (ret < 0) @@ -2615,6 +2635,16 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) /* initialize the dma version */ brcmnand_flash_dma_revision_init(ctrl); + ret = -EIO; + if (ctrl->nand_version >= 0x0700) + ret = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(40)); + if (ret) + ret = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(32)); + if (ret) + goto err; + /* linked-list and stop on error */ flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK); flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c new file mode 100644 index 000000000000..f6c7102a1e32 --- /dev/null +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -0,0 +1,3029 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Cadence NAND flash controller driver + * + * Copyright (C) 2019 Cadence + * + * Author: Piotr Sroka <piotrs@cadence.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/of_device.h> +#include <linux/iopoll.h> + +/* + * HPNFC can work in 3 modes: + * - PIO - can work in master or slave DMA + * - CDMA - needs Master DMA for accessing command descriptors. + * - Generic mode - can use only slave DMA. + * CDMA and PIO modes can be used to execute only base commands. + * Generic mode can be used to execute any command + * on NAND flash memory. Driver uses CDMA mode for + * block erasing, page reading, page programing. + * Generic mode is used for executing rest of commands. + */ + +#define MAX_OOB_SIZE_PER_SECTOR 32 +#define MAX_ADDRESS_CYC 6 +#define MAX_ERASE_ADDRESS_CYC 3 +#define MAX_DATA_SIZE 0xFFFC +#define DMA_DATA_SIZE_ALIGN 8 + +/* Register definition. */ +/* + * Command register 0. + * Writing data to this register will initiate a new transaction + * of the NF controller. + */ +#define CMD_REG0 0x0000 +/* Command type field mask. */ +#define CMD_REG0_CT GENMASK(31, 30) +/* Command type CDMA. */ +#define CMD_REG0_CT_CDMA 0uL +/* Command type generic. */ +#define CMD_REG0_CT_GEN 3uL +/* Command thread number field mask. */ +#define CMD_REG0_TN GENMASK(27, 24) + +/* Command register 2. */ +#define CMD_REG2 0x0008 +/* Command register 3. */ +#define CMD_REG3 0x000C +/* Pointer register to select which thread status will be selected. */ +#define CMD_STATUS_PTR 0x0010 +/* Command status register for selected thread. */ +#define CMD_STATUS 0x0014 + +/* Interrupt status register. */ +#define INTR_STATUS 0x0110 +#define INTR_STATUS_SDMA_ERR BIT(22) +#define INTR_STATUS_SDMA_TRIGG BIT(21) +#define INTR_STATUS_UNSUPP_CMD BIT(19) +#define INTR_STATUS_DDMA_TERR BIT(18) +#define INTR_STATUS_CDMA_TERR BIT(17) +#define INTR_STATUS_CDMA_IDL BIT(16) + +/* Interrupt enable register. */ +#define INTR_ENABLE 0x0114 +#define INTR_ENABLE_INTR_EN BIT(31) +#define INTR_ENABLE_SDMA_ERR_EN BIT(22) +#define INTR_ENABLE_SDMA_TRIGG_EN BIT(21) +#define INTR_ENABLE_UNSUPP_CMD_EN BIT(19) +#define INTR_ENABLE_DDMA_TERR_EN BIT(18) +#define INTR_ENABLE_CDMA_TERR_EN BIT(17) +#define INTR_ENABLE_CDMA_IDLE_EN BIT(16) + +/* Controller internal state. */ +#define CTRL_STATUS 0x0118 +#define CTRL_STATUS_INIT_COMP BIT(9) +#define CTRL_STATUS_CTRL_BUSY BIT(8) + +/* Command Engine threads state. */ +#define TRD_STATUS 0x0120 + +/* Command Engine interrupt thread error status. */ +#define TRD_ERR_INT_STATUS 0x0128 +/* Command Engine interrupt thread error enable. */ +#define TRD_ERR_INT_STATUS_EN 0x0130 +/* Command Engine interrupt thread complete status. */ +#define TRD_COMP_INT_STATUS 0x0138 + +/* + * Transfer config 0 register. + * Configures data transfer parameters. + */ +#define TRAN_CFG_0 0x0400 +/* Offset value from the beginning of the page. */ +#define TRAN_CFG_0_OFFSET GENMASK(31, 16) +/* Numbers of sectors to transfer within singlNF device's page. */ +#define TRAN_CFG_0_SEC_CNT GENMASK(7, 0) + +/* + * Transfer config 1 register. + * Configures data transfer parameters. + */ +#define TRAN_CFG_1 0x0404 +/* Size of last data sector. */ +#define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16) +/* Size of not-last data sector. */ +#define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0) + +/* ECC engine configuration register 0. */ +#define ECC_CONFIG_0 0x0428 +/* Correction strength. */ +#define ECC_CONFIG_0_CORR_STR GENMASK(10, 8) +/* Enable erased pages detection mechanism. */ +#define ECC_CONFIG_0_ERASE_DET_EN BIT(1) +/* Enable controller ECC check bits generation and correction. */ +#define ECC_CONFIG_0_ECC_EN BIT(0) + +/* ECC engine configuration register 1. */ +#define ECC_CONFIG_1 0x042C + +/* Multiplane settings register. */ +#define MULTIPLANE_CFG 0x0434 +/* Cache operation settings. */ +#define CACHE_CFG 0x0438 + +/* DMA settings register. */ +#define DMA_SETINGS 0x043C +/* Enable SDMA error report on access unprepared slave DMA interface. */ +#define DMA_SETINGS_SDMA_ERR_RSP BIT(17) + +/* Transferred data block size for the slave DMA module. */ +#define SDMA_SIZE 0x0440 + +/* Thread number associated with transferred data block + * for the slave DMA module. + */ +#define SDMA_TRD_NUM 0x0444 +/* Thread number mask. */ +#define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0) + +#define CONTROL_DATA_CTRL 0x0494 +/* Thread number mask. */ +#define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0) + +#define CTRL_VERSION 0x800 +#define CTRL_VERSION_REV GENMASK(7, 0) + +/* Available hardware features of the controller. */ +#define CTRL_FEATURES 0x804 +/* Support for NV-DDR2/3 work mode. */ +#define CTRL_FEATURES_NVDDR_2_3 BIT(28) +/* Support for NV-DDR work mode. */ +#define CTRL_FEATURES_NVDDR BIT(27) +/* Support for asynchronous work mode. */ +#define CTRL_FEATURES_ASYNC BIT(26) +/* Support for asynchronous work mode. */ +#define CTRL_FEATURES_N_BANKS GENMASK(25, 24) +/* Slave and Master DMA data width. */ +#define CTRL_FEATURES_DMA_DWITH64 BIT(21) +/* Availability of Control Data feature.*/ +#define CTRL_FEATURES_CONTROL_DATA BIT(10) + +/* BCH Engine identification register 0 - correction strengths. */ +#define BCH_CFG_0 0x838 +#define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0) +#define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8) +#define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16) +#define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24) + +/* BCH Engine identification register 1 - correction strengths. */ +#define BCH_CFG_1 0x83C +#define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0) +#define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8) +#define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16) +#define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24) + +/* BCH Engine identification register 2 - sector sizes. */ +#define BCH_CFG_2 0x840 +#define BCH_CFG_2_SECT_0 GENMASK(15, 0) +#define BCH_CFG_2_SECT_1 GENMASK(31, 16) + +/* BCH Engine identification register 3. */ +#define BCH_CFG_3 0x844 + +/* Ready/Busy# line status. */ +#define RBN_SETINGS 0x1004 + +/* Common settings. */ +#define COMMON_SET 0x1008 +/* 16 bit device connected to the NAND Flash interface. */ +#define COMMON_SET_DEVICE_16BIT BIT(8) + +/* Skip_bytes registers. */ +#define SKIP_BYTES_CONF 0x100C +#define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16) +#define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0) + +#define SKIP_BYTES_OFFSET 0x1010 +#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0) + +/* Timings configuration. */ +#define ASYNC_TOGGLE_TIMINGS 0x101c +#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24) +#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16) +#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8) +#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0) + +#define TIMINGS0 0x1024 +#define TIMINGS0_TADL GENMASK(31, 24) +#define TIMINGS0_TCCS GENMASK(23, 16) +#define TIMINGS0_TWHR GENMASK(15, 8) +#define TIMINGS0_TRHW GENMASK(7, 0) + +#define TIMINGS1 0x1028 +#define TIMINGS1_TRHZ GENMASK(31, 24) +#define TIMINGS1_TWB GENMASK(23, 16) +#define TIMINGS1_TVDLY GENMASK(7, 0) + +#define TIMINGS2 0x102c +#define TIMINGS2_TFEAT GENMASK(25, 16) +#define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8) +#define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0) + +/* Configuration of the resynchronization of slave DLL of PHY. */ +#define DLL_PHY_CTRL 0x1034 +#define DLL_PHY_CTRL_DLL_RST_N BIT(24) +#define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17) +#define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16) +#define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8) +#define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0) + +/* Register controlling DQ related timing. */ +#define PHY_DQ_TIMING 0x2000 +/* Register controlling DSQ related timing. */ +#define PHY_DQS_TIMING 0x2004 +#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0) +#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16) +#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20) + +/* Register controlling the gate and loopback control related timing. */ +#define PHY_GATE_LPBK_CTRL 0x2008 +#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19) + +/* Register holds the control for the master DLL logic. */ +#define PHY_DLL_MASTER_CTRL 0x200C +#define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23) + +/* Register holds the control for the slave DLL logic. */ +#define PHY_DLL_SLAVE_CTRL 0x2010 + +/* This register handles the global control settings for the PHY. */ +#define PHY_CTRL 0x2080 +#define PHY_CTRL_SDR_DQS BIT(14) +#define PHY_CTRL_PHONY_DQS GENMASK(9, 4) + +/* + * This register handles the global control settings + * for the termination selects for reads. + */ +#define PHY_TSEL 0x2084 + +/* Generic command layout. */ +#define GCMD_LAY_CS GENMASK_ULL(11, 8) +/* + * This bit informs the minicotroller if it has to wait for tWB + * after sending the last CMD/ADDR/DATA in the sequence. + */ +#define GCMD_LAY_TWB BIT_ULL(6) +/* Type of generic instruction. */ +#define GCMD_LAY_INSTR GENMASK_ULL(5, 0) + +/* Generic CMD sequence type. */ +#define GCMD_LAY_INSTR_CMD 0 +/* Generic ADDR sequence type. */ +#define GCMD_LAY_INSTR_ADDR 1 +/* Generic data transfer sequence type. */ +#define GCMD_LAY_INSTR_DATA 2 + +/* Input part of generic command type of input is command. */ +#define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16) + +/* Generic command address sequence - address fields. */ +#define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16) +/* Generic command address sequence - address size. */ +#define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11) + +/* Transfer direction field of generic command data sequence. */ +#define GCMD_DIR BIT_ULL(11) +/* Read transfer direction of generic command data sequence. */ +#define GCMD_DIR_READ 0 +/* Write transfer direction of generic command data sequence. */ +#define GCMD_DIR_WRITE 1 + +/* ECC enabled flag of generic command data sequence - ECC enabled. */ +#define GCMD_ECC_EN BIT_ULL(12) +/* Generic command data sequence - sector size. */ +#define GCMD_SECT_SIZE GENMASK_ULL(31, 16) +/* Generic command data sequence - sector count. */ +#define GCMD_SECT_CNT GENMASK_ULL(39, 32) +/* Generic command data sequence - last sector size. */ +#define GCMD_LAST_SIZE GENMASK_ULL(55, 40) + +/* CDMA descriptor fields. */ +/* Erase command type of CDMA descriptor. */ +#define CDMA_CT_ERASE 0x1000 +/* Program page command type of CDMA descriptor. */ +#define CDMA_CT_WR 0x2100 +/* Read page command type of CDMA descriptor. */ +#define CDMA_CT_RD 0x2200 + +/* Flash pointer memory shift. */ +#define CDMA_CFPTR_MEM_SHIFT 24 +/* Flash pointer memory mask. */ +#define CDMA_CFPTR_MEM GENMASK(26, 24) + +/* + * Command DMA descriptor flags. If set causes issue interrupt after + * the completion of descriptor processing. + */ +#define CDMA_CF_INT BIT(8) +/* + * Command DMA descriptor flags - the next descriptor + * address field is valid and descriptor processing should continue. + */ +#define CDMA_CF_CONT BIT(9) +/* DMA master flag of command DMA descriptor. */ +#define CDMA_CF_DMA_MASTER BIT(10) + +/* Operation complete status of command descriptor. */ +#define CDMA_CS_COMP BIT(15) +/* Operation complete status of command descriptor. */ +/* Command descriptor status - operation fail. */ +#define CDMA_CS_FAIL BIT(14) +/* Command descriptor status - page erased. */ +#define CDMA_CS_ERP BIT(11) +/* Command descriptor status - timeout occurred. */ +#define CDMA_CS_TOUT BIT(10) +/* + * Maximum amount of correction applied to one ECC sector. + * It is part of command descriptor status. + */ +#define CDMA_CS_MAXERR GENMASK(9, 2) +/* Command descriptor status - uncorrectable ECC error. */ +#define CDMA_CS_UNCE BIT(1) +/* Command descriptor status - descriptor error. */ +#define CDMA_CS_ERR BIT(0) + +/* Status of operation - OK. */ +#define STAT_OK 0 +/* Status of operation - FAIL. */ +#define STAT_FAIL 2 +/* Status of operation - uncorrectable ECC error. */ +#define STAT_ECC_UNCORR 3 +/* Status of operation - page erased. */ +#define STAT_ERASED 5 +/* Status of operation - correctable ECC error. */ +#define STAT_ECC_CORR 6 +/* Status of operation - unsuspected state. */ +#define STAT_UNKNOWN 7 +/* Status of operation - operation is not completed yet. */ +#define STAT_BUSY 0xFF + +#define BCH_MAX_NUM_CORR_CAPS 8 +#define BCH_MAX_NUM_SECTOR_SIZES 2 + +struct cadence_nand_timings { + u32 async_toggle_timings; + u32 timings0; + u32 timings1; + u32 timings2; + u32 dll_phy_ctrl; + u32 phy_ctrl; + u32 phy_dqs_timing; + u32 phy_gate_lpbk_ctrl; +}; + +/* Command DMA descriptor. */ +struct cadence_nand_cdma_desc { + /* Next descriptor address. */ + u64 next_pointer; + + /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */ + u32 flash_pointer; + /*field appears in HPNFC version 13*/ + u16 bank; + u16 rsvd0; + + /* Operation the controller needs to perform. */ + u16 command_type; + u16 rsvd1; + /* Flags for operation of this command. */ + u16 command_flags; + u16 rsvd2; + + /* System/host memory address required for data DMA commands. */ + u64 memory_pointer; + + /* Status of operation. */ + u32 status; + u32 rsvd3; + + /* Address pointer to sync buffer location. */ + u64 sync_flag_pointer; + + /* Controls the buffer sync mechanism. */ + u32 sync_arguments; + u32 rsvd4; + + /* Control data pointer. */ + u64 ctrl_data_ptr; +}; + +/* Interrupt status. */ +struct cadence_nand_irq_status { + /* Thread operation complete status. */ + u32 trd_status; + /* Thread operation error. */ + u32 trd_error; + /* Controller status. */ + u32 status; +}; + +/* Cadence NAND flash controller capabilities get from driver data. */ +struct cadence_nand_dt_devdata { + /* Skew value of the output signals of the NAND Flash interface. */ + u32 if_skew; + /* It informs if slave DMA interface is connected to DMA engine. */ + unsigned int has_dma:1; +}; + +/* Cadence NAND flash controller capabilities read from registers. */ +struct cdns_nand_caps { + /* Maximum number of banks supported by hardware. */ + u8 max_banks; + /* Slave and Master DMA data width in bytes (4 or 8). */ + u8 data_dma_width; + /* Control Data feature supported. */ + bool data_control_supp; + /* Is PHY type DLL. */ + bool is_phy_type_dll; +}; + +struct cdns_nand_ctrl { + struct device *dev; + struct nand_controller controller; + struct cadence_nand_cdma_desc *cdma_desc; + /* IP capability. */ + const struct cadence_nand_dt_devdata *caps1; + struct cdns_nand_caps caps2; + u8 ctrl_rev; + dma_addr_t dma_cdma_desc; + u8 *buf; + u32 buf_size; + u8 curr_corr_str_idx; + + /* Register interface. */ + void __iomem *reg; + + struct { + void __iomem *virt; + dma_addr_t dma; + } io; + + int irq; + /* Interrupts that have happened. */ + struct cadence_nand_irq_status irq_status; + /* Interrupts we are waiting for. */ + struct cadence_nand_irq_status irq_mask; + struct completion complete; + /* Protect irq_mask and irq_status. */ + spinlock_t irq_lock; + + int ecc_strengths[BCH_MAX_NUM_CORR_CAPS]; + struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES]; + struct nand_ecc_caps ecc_caps; + + int curr_trans_type; + + struct dma_chan *dmac; + + u32 nf_clk_rate; + /* + * Estimated Board delay. The value includes the total + * round trip delay for the signals and is used for deciding on values + * associated with data read capture. + */ + u32 board_delay; + + struct nand_chip *selected_chip; + + unsigned long assigned_cs; + struct list_head chips; +}; + +struct cdns_nand_chip { + struct cadence_nand_timings timings; + struct nand_chip chip; + u8 nsels; + struct list_head node; + + /* + * part of oob area of NAND flash memory page. + * This part is available for user to read or write. + */ + u32 avail_oob_size; + + /* Sector size. There are few sectors per mtd->writesize */ + u32 sector_size; + u32 sector_count; + + /* Offset of BBM. */ + u8 bbm_offs; + /* Number of bytes reserved for BBM. */ + u8 bbm_len; + /* ECC strength index. */ + u8 corr_str_idx; + + u8 cs[]; +}; + +struct ecc_info { + int (*calc_ecc_bytes)(int step_size, int strength); + int max_step_size; +}; + +static inline struct +cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip) +{ + return container_of(chip, struct cdns_nand_chip, chip); +} + +static inline struct +cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller) +{ + return container_of(controller, struct cdns_nand_ctrl, controller); +} + +static bool +cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf, + u32 buf_len) +{ + u8 data_dma_width = cdns_ctrl->caps2.data_dma_width; + + return buf && virt_addr_valid(buf) && + likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) && + likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN)); +} + +static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl, + u32 reg_offset, u32 timeout_us, + u32 mask, bool is_clear) +{ + u32 val; + int ret; + + ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset, + val, !(val & mask) == is_clear, + 10, timeout_us); + + if (ret < 0) { + dev_err(cdns_ctrl->dev, + "Timeout while waiting for reg %x with mask %x is clear %d\n", + reg_offset, mask, is_clear); + } + + return ret; +} + +static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl, + bool enable) +{ + u32 reg; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); + + if (enable) + reg |= ECC_CONFIG_0_ECC_EN; + else + reg &= ~ECC_CONFIG_0_ECC_EN; + + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); + + return 0; +} + +static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl, + u8 corr_str_idx) +{ + u32 reg; + + if (cdns_ctrl->curr_corr_str_idx == corr_str_idx) + return; + + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); + reg &= ~ECC_CONFIG_0_CORR_STR; + reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx); + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); + + cdns_ctrl->curr_corr_str_idx = corr_str_idx; +} + +static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl, + u8 strength) +{ + int i, corr_str_idx = -1; + + for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) { + if (cdns_ctrl->ecc_strengths[i] == strength) { + corr_str_idx = i; + break; + } + } + + return corr_str_idx; +} + +static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl, + u16 marker_value) +{ + u32 reg; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF); + reg &= ~SKIP_BYTES_MARKER_VALUE; + reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE, + marker_value); + + writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF); + + return 0; +} + +static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl, + u8 num_of_bytes, + u32 offset_value, + int enable) +{ + u32 reg, skip_bytes_offset; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + if (!enable) { + num_of_bytes = 0; + offset_value = 0; + } + + reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF); + reg &= ~SKIP_BYTES_NUM_OF_BYTES; + reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES, + num_of_bytes); + skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE, + offset_value); + + writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF); + writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET); + + return 0; +} + +/* Functions enables/disables hardware detection of erased data */ +static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl, + bool enable, + u8 bitflips_threshold) +{ + u32 reg; + + reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0); + + if (enable) + reg |= ECC_CONFIG_0_ERASE_DET_EN; + else + reg &= ~ECC_CONFIG_0_ERASE_DET_EN; + + writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0); + writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1); +} + +static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl, + bool bit_bus16) +{ + u32 reg; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET); + + if (!bit_bus16) + reg &= ~COMMON_SET_DEVICE_16BIT; + else + reg |= COMMON_SET_DEVICE_16BIT; + writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET); + + return 0; +} + +static void +cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_status) +{ + writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS); + writel_relaxed(irq_status->trd_status, + cdns_ctrl->reg + TRD_COMP_INT_STATUS); + writel_relaxed(irq_status->trd_error, + cdns_ctrl->reg + TRD_ERR_INT_STATUS); +} + +static void +cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_status) +{ + irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS); + irq_status->trd_status = readl_relaxed(cdns_ctrl->reg + + TRD_COMP_INT_STATUS); + irq_status->trd_error = readl_relaxed(cdns_ctrl->reg + + TRD_ERR_INT_STATUS); +} + +static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_status) +{ + cadence_nand_read_int_status(cdns_ctrl, irq_status); + + return irq_status->status || irq_status->trd_status || + irq_status->trd_error; +} + +static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl) +{ + unsigned long flags; + + spin_lock_irqsave(&cdns_ctrl->irq_lock, flags); + memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status)); + memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask)); + spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags); +} + +/* + * This is the interrupt service routine. It handles all interrupts + * sent to this device. + */ +static irqreturn_t cadence_nand_isr(int irq, void *dev_id) +{ + struct cdns_nand_ctrl *cdns_ctrl = dev_id; + struct cadence_nand_irq_status irq_status; + irqreturn_t result = IRQ_NONE; + + spin_lock(&cdns_ctrl->irq_lock); + + if (irq_detected(cdns_ctrl, &irq_status)) { + /* Handle interrupt. */ + /* First acknowledge it. */ + cadence_nand_clear_interrupt(cdns_ctrl, &irq_status); + /* Status in the device context for someone to read. */ + cdns_ctrl->irq_status.status |= irq_status.status; + cdns_ctrl->irq_status.trd_status |= irq_status.trd_status; + cdns_ctrl->irq_status.trd_error |= irq_status.trd_error; + /* Notify anyone who cares that it happened. */ + complete(&cdns_ctrl->complete); + /* Tell the OS that we've handled this. */ + result = IRQ_HANDLED; + } + spin_unlock(&cdns_ctrl->irq_lock); + + return result; +} + +static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_mask) +{ + writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status, + cdns_ctrl->reg + INTR_ENABLE); + + writel_relaxed(irq_mask->trd_error, + cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN); +} + +static void +cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_irq_status *irq_mask, + struct cadence_nand_irq_status *irq_status) +{ + unsigned long timeout = msecs_to_jiffies(10000); + unsigned long time_left; + + time_left = wait_for_completion_timeout(&cdns_ctrl->complete, + timeout); + + *irq_status = cdns_ctrl->irq_status; + if (time_left == 0) { + /* Timeout error. */ + dev_err(cdns_ctrl->dev, "timeout occurred:\n"); + dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n", + irq_status->status, irq_mask->status); + dev_err(cdns_ctrl->dev, + "\ttrd_status = 0x%x, trd_status mask = 0x%x\n", + irq_status->trd_status, irq_mask->trd_status); + dev_err(cdns_ctrl->dev, + "\t trd_error = 0x%x, trd_error mask = 0x%x\n", + irq_status->trd_error, irq_mask->trd_error); + } +} + +/* Execute generic command on NAND controller. */ +static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl, + u8 chip_nr, + u64 mini_ctrl_cmd) +{ + u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg; + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr); + mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF; + mini_ctrl_cmd_h = mini_ctrl_cmd >> 32; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + cadence_nand_reset_irq(cdns_ctrl); + + writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2); + writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3); + + /* Select generic command. */ + reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN); + /* Thread number. */ + reg |= FIELD_PREP(CMD_REG0_TN, 0); + + /* Issue command. */ + writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0); + + return 0; +} + +/* Wait for data on slave DMA interface. */ +static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl, + u8 *out_sdma_trd, + u32 *out_sdma_size) +{ + struct cadence_nand_irq_status irq_mask, irq_status; + + irq_mask.trd_status = 0; + irq_mask.trd_error = 0; + irq_mask.status = INTR_STATUS_SDMA_TRIGG + | INTR_STATUS_SDMA_ERR + | INTR_STATUS_UNSUPP_CMD; + + cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask); + cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status); + if (irq_status.status == 0) { + dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n"); + return -ETIMEDOUT; + } + + if (irq_status.status & INTR_STATUS_SDMA_TRIGG) { + *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE); + *out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM); + *out_sdma_trd = + FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd); + } else { + dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n", + irq_status.status); + return -EIO; + } + + return 0; +} + +static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl) +{ + u32 reg; + + reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES); + + cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg); + + if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg)) + cdns_ctrl->caps2.data_dma_width = 8; + else + cdns_ctrl->caps2.data_dma_width = 4; + + if (reg & CTRL_FEATURES_CONTROL_DATA) + cdns_ctrl->caps2.data_control_supp = true; + + if (reg & (CTRL_FEATURES_NVDDR_2_3 + | CTRL_FEATURES_NVDDR)) + cdns_ctrl->caps2.is_phy_type_dll = true; +} + +/* Prepare CDMA descriptor. */ +static void +cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, + char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr, + dma_addr_t ctrl_data_ptr, u16 ctype) +{ + struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc; + + memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc)); + + /* Set fields for one descriptor. */ + cdma_desc->flash_pointer = flash_ptr; + if (cdns_ctrl->ctrl_rev >= 13) + cdma_desc->bank = nf_mem; + else + cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT); + + cdma_desc->command_flags |= CDMA_CF_DMA_MASTER; + cdma_desc->command_flags |= CDMA_CF_INT; + + cdma_desc->memory_pointer = mem_ptr; + cdma_desc->status = 0; + cdma_desc->sync_flag_pointer = 0; + cdma_desc->sync_arguments = 0; + + cdma_desc->command_type = ctype; + cdma_desc->ctrl_data_ptr = ctrl_data_ptr; +} + +static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl, + u32 desc_status) +{ + if (desc_status & CDMA_CS_ERP) + return STAT_ERASED; + + if (desc_status & CDMA_CS_UNCE) + return STAT_ECC_UNCORR; + + if (desc_status & CDMA_CS_ERR) { + dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n"); + return STAT_FAIL; + } + + if (FIELD_GET(CDMA_CS_MAXERR, desc_status)) + return STAT_ECC_CORR; + + return STAT_FAIL; +} + +static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl) +{ + struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc; + u8 status = STAT_BUSY; + + if (desc_ptr->status & CDMA_CS_FAIL) { + status = cadence_nand_check_desc_error(cdns_ctrl, + desc_ptr->status); + dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status); + } else if (desc_ptr->status & CDMA_CS_COMP) { + /* Descriptor finished with no errors. */ + if (desc_ptr->command_flags & CDMA_CF_CONT) { + dev_info(cdns_ctrl->dev, "DMA unsupported flag is set"); + status = STAT_UNKNOWN; + } else { + /* Last descriptor. */ + status = STAT_OK; + } + } + + return status; +} + +static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl, + u8 thread) +{ + u32 reg; + int status; + + /* Wait for thread ready. */ + status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS, + 1000000, + BIT(thread), true); + if (status) + return status; + + cadence_nand_reset_irq(cdns_ctrl); + + writel_relaxed((u32)cdns_ctrl->dma_cdma_desc, + cdns_ctrl->reg + CMD_REG2); + writel_relaxed(0, cdns_ctrl->reg + CMD_REG3); + + /* Select CDMA mode. */ + reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA); + /* Thread number. */ + reg |= FIELD_PREP(CMD_REG0_TN, thread); + /* Issue command. */ + writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0); + + return 0; +} + +/* Send SDMA command and wait for finish. */ +static u32 +cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl, + u8 thread) +{ + struct cadence_nand_irq_status irq_mask, irq_status = {0}; + int status; + + irq_mask.trd_status = BIT(thread); + irq_mask.trd_error = BIT(thread); + irq_mask.status = INTR_STATUS_CDMA_TERR; + + cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask); + + status = cadence_nand_cdma_send(cdns_ctrl, thread); + if (status) + return status; + + cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status); + + if (irq_status.status == 0 && irq_status.trd_status == 0 && + irq_status.trd_error == 0) { + dev_err(cdns_ctrl->dev, "CDMA command timeout\n"); + return -ETIMEDOUT; + } + if (irq_status.status & irq_mask.status) { + dev_err(cdns_ctrl->dev, "CDMA command failed\n"); + return -EIO; + } + + return 0; +} + +/* + * ECC size depends on configured ECC strength and on maximum supported + * ECC step size. + */ +static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength) +{ + int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8); + + return ALIGN(nbytes, 2); +} + +#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \ + static int \ + cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \ + int strength)\ + {\ + return cadence_nand_calc_ecc_bytes(max_step_size, strength);\ + } + +CADENCE_NAND_CALC_ECC_BYTES(256) +CADENCE_NAND_CALC_ECC_BYTES(512) +CADENCE_NAND_CALC_ECC_BYTES(1024) +CADENCE_NAND_CALC_ECC_BYTES(2048) +CADENCE_NAND_CALC_ECC_BYTES(4096) + +/* Function reads BCH capabilities. */ +static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl) +{ + struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps; + int max_step_size = 0, nstrengths, i; + u32 reg; + + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0); + cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg); + cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg); + cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg); + cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg); + + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1); + cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg); + cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg); + cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg); + cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg); + + reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2); + cdns_ctrl->ecc_stepinfos[0].stepsize = + FIELD_GET(BCH_CFG_2_SECT_0, reg); + + cdns_ctrl->ecc_stepinfos[1].stepsize = + FIELD_GET(BCH_CFG_2_SECT_1, reg); + + nstrengths = 0; + for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) { + if (cdns_ctrl->ecc_strengths[i] != 0) + nstrengths++; + } + + ecc_caps->nstepinfos = 0; + for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) { + /* ECC strengths are common for all step infos. */ + cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths; + cdns_ctrl->ecc_stepinfos[i].strengths = + cdns_ctrl->ecc_strengths; + + if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0) + ecc_caps->nstepinfos++; + + if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size) + max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize; + } + ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0]; + + switch (max_step_size) { + case 256: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256; + break; + case 512: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512; + break; + case 1024: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024; + break; + case 2048: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048; + break; + case 4096: + ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096; + break; + default: + dev_err(cdns_ctrl->dev, + "Unsupported sector size(ecc step size) %d\n", + max_step_size); + return -EIO; + } + + return 0; +} + +/* Hardware initialization. */ +static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl) +{ + int status; + u32 reg; + + status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_INIT_COMP, false); + if (status) + return status; + + reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION); + cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg); + + dev_info(cdns_ctrl->dev, + "%s: cadence nand controller version reg %x\n", + __func__, reg); + + /* Disable cache and multiplane. */ + writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG); + writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG); + + /* Clear all interrupts. */ + writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS); + + cadence_nand_get_caps(cdns_ctrl); + cadence_nand_read_bch_caps(cdns_ctrl); + + /* + * Set IO width access to 8. + * It is because during SW device discovering width access + * is expected to be 8. + */ + status = cadence_nand_set_access_width16(cdns_ctrl, false); + + return status; +} + +#define TT_MAIN_OOB_AREAS 2 +#define TT_RAW_PAGE 3 +#define TT_BBM 4 +#define TT_MAIN_OOB_AREA_EXT 5 + +/* Prepare size of data to transfer. */ +static void +cadence_nand_prepare_data_size(struct nand_chip *chip, + int transfer_type) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + u32 sec_size = 0, offset = 0, sec_cnt = 1; + u32 last_sec_size = cdns_chip->sector_size; + u32 data_ctrl_size = 0; + u32 reg = 0; + + if (cdns_ctrl->curr_trans_type == transfer_type) + return; + + switch (transfer_type) { + case TT_MAIN_OOB_AREA_EXT: + sec_cnt = cdns_chip->sector_count; + sec_size = cdns_chip->sector_size; + data_ctrl_size = cdns_chip->avail_oob_size; + break; + case TT_MAIN_OOB_AREAS: + sec_cnt = cdns_chip->sector_count; + last_sec_size = cdns_chip->sector_size + + cdns_chip->avail_oob_size; + sec_size = cdns_chip->sector_size; + break; + case TT_RAW_PAGE: + last_sec_size = mtd->writesize + mtd->oobsize; + break; + case TT_BBM: + offset = mtd->writesize + cdns_chip->bbm_offs; + last_sec_size = 8; + break; + } + + reg = 0; + reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset); + reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt); + writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0); + + reg = 0; + reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size); + reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size); + writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1); + + if (cdns_ctrl->caps2.data_control_supp) { + reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL); + reg &= ~CONTROL_DATA_CTRL_SIZE; + reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size); + writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL); + } + + cdns_ctrl->curr_trans_type = transfer_type; +} + +static int +cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr, + int page, void *buf, void *ctrl_dat, u32 buf_size, + u32 ctrl_dat_size, enum dma_data_direction dir, + bool with_ecc) +{ + dma_addr_t dma_buf, dma_ctrl_dat = 0; + u8 thread_nr = chip_nr; + int status; + u16 ctype; + + if (dir == DMA_FROM_DEVICE) + ctype = CDMA_CT_RD; + else + ctype = CDMA_CT_WR; + + cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc); + + dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir); + if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) { + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); + return -EIO; + } + + if (ctrl_dat && ctrl_dat_size) { + dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat, + ctrl_dat_size, dir); + if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) { + dma_unmap_single(cdns_ctrl->dev, dma_buf, + buf_size, dir); + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); + return -EIO; + } + } + + cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page, + dma_buf, dma_ctrl_dat, ctype); + + status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); + + dma_unmap_single(cdns_ctrl->dev, dma_buf, + buf_size, dir); + + if (ctrl_dat && ctrl_dat_size) + dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat, + ctrl_dat_size, dir); + if (status) + return status; + + return cadence_nand_cdma_finish(cdns_ctrl); +} + +static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl, + struct cadence_nand_timings *t) +{ + writel_relaxed(t->async_toggle_timings, + cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS); + writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0); + writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1); + writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2); + + if (cdns_ctrl->caps2.is_phy_type_dll) + writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL); + + writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL); + + if (cdns_ctrl->caps2.is_phy_type_dll) { + writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL); + writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING); + writel_relaxed(t->phy_dqs_timing, + cdns_ctrl->reg + PHY_DQS_TIMING); + writel_relaxed(t->phy_gate_lpbk_ctrl, + cdns_ctrl->reg + PHY_GATE_LPBK_CTRL); + writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE, + cdns_ctrl->reg + PHY_DLL_MASTER_CTRL); + writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL); + } +} + +static int cadence_nand_select_target(struct nand_chip *chip) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + + if (chip == cdns_ctrl->selected_chip) + return 0; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings); + + cadence_nand_set_ecc_strength(cdns_ctrl, + cdns_chip->corr_str_idx); + + cadence_nand_set_erase_detection(cdns_ctrl, true, + chip->ecc.strength); + + cdns_ctrl->curr_trans_type = -1; + cdns_ctrl->selected_chip = chip; + + return 0; +} + +static int cadence_nand_erase(struct nand_chip *chip, u32 page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + int status; + u8 thread_nr = cdns_chip->cs[chip->cur_cs]; + + cadence_nand_cdma_desc_prepare(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, 0, 0, + CDMA_CT_ERASE); + status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); + if (status) { + dev_err(cdns_ctrl->dev, "erase operation failed\n"); + return -EIO; + } + + status = cadence_nand_cdma_finish(cdns_ctrl); + if (status) + return status; + + return 0; +} + +static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf) +{ + int status; + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + + cadence_nand_prepare_data_size(chip, TT_BBM); + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); + + /* + * Read only bad block marker from offset + * defined by a memory manufacturer. + */ + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, NULL, + mtd->oobsize, + 0, DMA_FROM_DEVICE, false); + if (status) { + dev_err(cdns_ctrl->dev, "read BBM failed\n"); + return -EIO; + } + + memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len); + + return 0; +} + +static int cadence_nand_write_page(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int status; + u16 marker_val = 0xFFFF; + + status = cadence_nand_select_target(chip); + if (status) + return status; + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len, + mtd->writesize + + cdns_chip->bbm_offs, + 1); + + if (oob_required) { + marker_val = *(u16 *)(chip->oob_poi + + cdns_chip->bbm_offs); + } else { + /* Set oob data to 0xFF. */ + memset(cdns_ctrl->buf + mtd->writesize, 0xFF, + cdns_chip->avail_oob_size); + } + + cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val); + + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT); + + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) && + cdns_ctrl->caps2.data_control_supp) { + u8 *oob; + + if (oob_required) + oob = chip->oob_poi; + else + oob = cdns_ctrl->buf + mtd->writesize; + + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, (void *)buf, oob, + mtd->writesize, + cdns_chip->avail_oob_size, + DMA_TO_DEVICE, true); + if (status) { + dev_err(cdns_ctrl->dev, "write page failed\n"); + return -EIO; + } + + return 0; + } + + if (oob_required) { + /* Transfer the data to the oob area. */ + memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi, + cdns_chip->avail_oob_size); + } + + memcpy(cdns_ctrl->buf, buf, mtd->writesize); + + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS); + + return cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, NULL, + mtd->writesize + + cdns_chip->avail_oob_size, + 0, DMA_TO_DEVICE, true); +} + +static int cadence_nand_write_oob(struct nand_chip *chip, int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + + memset(cdns_ctrl->buf, 0xFF, mtd->writesize); + + return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page); +} + +static int cadence_nand_write_page_raw(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int writesize = mtd->writesize; + int oobsize = mtd->oobsize; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + void *tmp_buf = cdns_ctrl->buf; + int oob_skip = cdns_chip->bbm_len; + size_t size = writesize + oobsize; + int i, pos, len; + int status = 0; + + status = cadence_nand_select_target(chip); + if (status) + return status; + + /* + * Fill the buffer with 0xff first except the full page transfer. + * This simplifies the logic. + */ + if (!buf || !oob_required) + memset(tmp_buf, 0xff, size); + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); + + /* Arrange the buffer for syndrome payload/ecc layout. */ + if (buf) { + for (i = 0; i < ecc_steps; i++) { + pos = i * (ecc_size + ecc_bytes); + len = ecc_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(tmp_buf + pos, buf, len); + buf += len; + if (len < ecc_size) { + len = ecc_size - len; + memcpy(tmp_buf + writesize + oob_skip, buf, + len); + buf += len; + } + } + } + + if (oob_required) { + const u8 *oob = chip->oob_poi; + u32 oob_data_offset = (cdns_chip->sector_count - 1) * + (cdns_chip->sector_size + chip->ecc.bytes) + + cdns_chip->sector_size + oob_skip; + + /* BBM at the beginning of the OOB area. */ + memcpy(tmp_buf + writesize, oob, oob_skip); + + /* OOB free. */ + memcpy(tmp_buf + oob_data_offset, oob, + cdns_chip->avail_oob_size); + oob += cdns_chip->avail_oob_size; + + /* OOB ECC. */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + if (i == (ecc_steps - 1)) + pos += cdns_chip->avail_oob_size; + + len = ecc_bytes; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(tmp_buf + pos, oob, len); + oob += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + memcpy(tmp_buf + writesize + oob_skip, oob, + len); + oob += len; + } + } + } + + cadence_nand_prepare_data_size(chip, TT_RAW_PAGE); + + return cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, NULL, + mtd->writesize + + mtd->oobsize, + 0, DMA_TO_DEVICE, false); +} + +static int cadence_nand_write_oob_raw(struct nand_chip *chip, + int page) +{ + return cadence_nand_write_page_raw(chip, NULL, true, page); +} + +static int cadence_nand_read_page(struct nand_chip *chip, + u8 *buf, int oob_required, int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int status = 0; + int ecc_err_count = 0; + + status = cadence_nand_select_target(chip); + if (status) + return status; + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len, + mtd->writesize + + cdns_chip->bbm_offs, 1); + + /* + * If data buffer can be accessed by DMA and data_control feature + * is supported then transfer data and oob directly. + */ + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) && + cdns_ctrl->caps2.data_control_supp) { + u8 *oob; + + if (oob_required) + oob = chip->oob_poi; + else + oob = cdns_ctrl->buf + mtd->writesize; + + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT); + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, buf, oob, + mtd->writesize, + cdns_chip->avail_oob_size, + DMA_FROM_DEVICE, true); + /* Otherwise use bounce buffer. */ + } else { + cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS); + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, + NULL, mtd->writesize + + cdns_chip->avail_oob_size, + 0, DMA_FROM_DEVICE, true); + + memcpy(buf, cdns_ctrl->buf, mtd->writesize); + if (oob_required) + memcpy(chip->oob_poi, + cdns_ctrl->buf + mtd->writesize, + mtd->oobsize); + } + + switch (status) { + case STAT_ECC_UNCORR: + mtd->ecc_stats.failed++; + ecc_err_count++; + break; + case STAT_ECC_CORR: + ecc_err_count = FIELD_GET(CDMA_CS_MAXERR, + cdns_ctrl->cdma_desc->status); + mtd->ecc_stats.corrected += ecc_err_count; + break; + case STAT_ERASED: + case STAT_OK: + break; + default: + dev_err(cdns_ctrl->dev, "read page failed\n"); + return -EIO; + } + + if (oob_required) + if (cadence_nand_read_bbm(chip, page, chip->oob_poi)) + return -EIO; + + return ecc_err_count; +} + +/* Reads OOB data from the device. */ +static int cadence_nand_read_oob(struct nand_chip *chip, int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + + return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page); +} + +static int cadence_nand_read_page_raw(struct nand_chip *chip, + u8 *buf, int oob_required, int page) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int oob_skip = cdns_chip->bbm_len; + int writesize = mtd->writesize; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + void *tmp_buf = cdns_ctrl->buf; + int i, pos, len; + int status = 0; + + status = cadence_nand_select_target(chip); + if (status) + return status; + + cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0); + + cadence_nand_prepare_data_size(chip, TT_RAW_PAGE); + status = cadence_nand_cdma_transfer(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + page, cdns_ctrl->buf, NULL, + mtd->writesize + + mtd->oobsize, + 0, DMA_FROM_DEVICE, false); + + switch (status) { + case STAT_ERASED: + case STAT_OK: + break; + default: + dev_err(cdns_ctrl->dev, "read raw page failed\n"); + return -EIO; + } + + /* Arrange the buffer for syndrome payload/ecc layout. */ + if (buf) { + for (i = 0; i < ecc_steps; i++) { + pos = i * (ecc_size + ecc_bytes); + len = ecc_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(buf, tmp_buf + pos, len); + buf += len; + if (len < ecc_size) { + len = ecc_size - len; + memcpy(buf, tmp_buf + writesize + oob_skip, + len); + buf += len; + } + } + } + + if (oob_required) { + u8 *oob = chip->oob_poi; + u32 oob_data_offset = (cdns_chip->sector_count - 1) * + (cdns_chip->sector_size + chip->ecc.bytes) + + cdns_chip->sector_size + oob_skip; + + /* OOB free. */ + memcpy(oob, tmp_buf + oob_data_offset, + cdns_chip->avail_oob_size); + + /* BBM at the beginning of the OOB area. */ + memcpy(oob, tmp_buf + writesize, oob_skip); + + oob += cdns_chip->avail_oob_size; + + /* OOB ECC */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + len = ecc_bytes; + + if (i == (ecc_steps - 1)) + pos += cdns_chip->avail_oob_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(oob, tmp_buf + pos, len); + oob += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + memcpy(oob, tmp_buf + writesize + oob_skip, + len); + oob += len; + } + } + } + + return 0; +} + +static int cadence_nand_read_oob_raw(struct nand_chip *chip, + int page) +{ + return cadence_nand_read_page_raw(chip, NULL, true, page); +} + +static void cadence_nand_slave_dma_transfer_finished(void *data) +{ + struct completion *finished = data; + + complete(finished); +} + +static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, + void *buf, + dma_addr_t dev_dma, size_t len, + enum dma_data_direction dir) +{ + DECLARE_COMPLETION_ONSTACK(finished); + struct dma_chan *chan; + struct dma_device *dma_dev; + dma_addr_t src_dma, dst_dma, buf_dma; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + + chan = cdns_ctrl->dmac; + dma_dev = chan->device; + + buf_dma = dma_map_single(dma_dev->dev, buf, len, dir); + if (dma_mapping_error(dma_dev->dev, buf_dma)) { + dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n"); + goto err; + } + + if (dir == DMA_FROM_DEVICE) { + src_dma = cdns_ctrl->io.dma; + dst_dma = buf_dma; + } else { + src_dma = buf_dma; + dst_dma = cdns_ctrl->io.dma; + } + + tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n"); + goto err_unmap; + } + + tx->callback = cadence_nand_slave_dma_transfer_finished; + tx->callback_param = &finished; + + cookie = dmaengine_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n"); + goto err_unmap; + } + + dma_async_issue_pending(cdns_ctrl->dmac); + wait_for_completion(&finished); + + dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + + return 0; + +err_unmap: + dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + +err: + dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n"); + + return -EIO; +} + +static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl, + u8 *buf, int len) +{ + u8 thread_nr = 0; + u32 sdma_size; + int status; + + /* Wait until slave DMA interface is ready to data transfer. */ + status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size); + if (status) + return status; + + if (!cdns_ctrl->caps1->has_dma) { + int len_in_words = len >> 2; + + /* read alingment data */ + ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words); + if (sdma_size > len) { + /* read rest data from slave DMA interface if any */ + ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf, + sdma_size / 4 - len_in_words); + /* copy rest of data */ + memcpy(buf + (len_in_words << 2), cdns_ctrl->buf, + len - (len_in_words << 2)); + } + return 0; + } + + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) { + status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf, + cdns_ctrl->io.dma, + len, DMA_FROM_DEVICE); + if (status == 0) + return 0; + + dev_warn(cdns_ctrl->dev, + "Slave DMA transfer failed. Try again using bounce buffer."); + } + + /* If DMA transfer is not possible or failed then use bounce buffer. */ + status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf, + cdns_ctrl->io.dma, + sdma_size, DMA_FROM_DEVICE); + + if (status) { + dev_err(cdns_ctrl->dev, "Slave DMA transfer failed"); + return status; + } + + memcpy(buf, cdns_ctrl->buf, len); + + return 0; +} + +static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl, + const u8 *buf, int len) +{ + u8 thread_nr = 0; + u32 sdma_size; + int status; + + /* Wait until slave DMA interface is ready to data transfer. */ + status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size); + if (status) + return status; + + if (!cdns_ctrl->caps1->has_dma) { + int len_in_words = len >> 2; + + iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words); + if (sdma_size > len) { + /* copy rest of data */ + memcpy(cdns_ctrl->buf, buf + (len_in_words << 2), + len - (len_in_words << 2)); + /* write all expected by nand controller data */ + iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf, + sdma_size / 4 - len_in_words); + } + + return 0; + } + + if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) { + status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf, + cdns_ctrl->io.dma, + len, DMA_TO_DEVICE); + if (status == 0) + return 0; + + dev_warn(cdns_ctrl->dev, + "Slave DMA transfer failed. Try again using bounce buffer."); + } + + /* If DMA transfer is not possible or failed then use bounce buffer. */ + memcpy(cdns_ctrl->buf, buf, len); + + status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf, + cdns_ctrl->io.dma, + sdma_size, DMA_TO_DEVICE); + + if (status) + dev_err(cdns_ctrl->dev, "Slave DMA transfer failed"); + + return status; +} + +static int cadence_nand_force_byte_access(struct nand_chip *chip, + bool force_8bit) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + int status; + + /* + * Callers of this function do not verify if the NAND is using a 16-bit + * an 8-bit bus for normal operations, so we need to take care of that + * here by leaving the configuration unchanged if the NAND does not have + * the NAND_BUSWIDTH_16 flag set. + */ + if (!(chip->options & NAND_BUSWIDTH_16)) + return 0; + + status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit); + + return status; +} + +static int cadence_nand_cmd_opcode(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + const struct nand_op_instr *instr; + unsigned int op_id = 0; + u64 mini_ctrl_cmd = 0; + int ret; + + instr = &subop->instrs[op_id]; + + if (instr->delay_ns > 0) + mini_ctrl_cmd |= GCMD_LAY_TWB; + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, + GCMD_LAY_INSTR_CMD); + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD, + instr->ctx.cmd.opcode); + + ret = cadence_nand_generic_cmd_send(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + mini_ctrl_cmd); + if (ret) + dev_err(cdns_ctrl->dev, "send cmd %x failed\n", + instr->ctx.cmd.opcode); + + return ret; +} + +static int cadence_nand_cmd_address(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + const struct nand_op_instr *instr; + unsigned int op_id = 0; + u64 mini_ctrl_cmd = 0; + unsigned int offset, naddrs; + u64 address = 0; + const u8 *addrs; + int ret; + int i; + + instr = &subop->instrs[op_id]; + + if (instr->delay_ns > 0) + mini_ctrl_cmd |= GCMD_LAY_TWB; + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, + GCMD_LAY_INSTR_ADDR); + + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + + for (i = 0; i < naddrs; i++) + address |= (u64)addrs[i] << (8 * i); + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR, + address); + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE, + naddrs - 1); + + ret = cadence_nand_generic_cmd_send(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + mini_ctrl_cmd); + if (ret) + dev_err(cdns_ctrl->dev, "send address %llx failed\n", address); + + return ret; +} + +static int cadence_nand_cmd_erase(struct nand_chip *chip, + const struct nand_subop *subop) +{ + unsigned int op_id; + + if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) { + int i; + const struct nand_op_instr *instr = NULL; + unsigned int offset, naddrs; + const u8 *addrs; + u32 page = 0; + + instr = &subop->instrs[1]; + offset = nand_subop_get_addr_start_off(subop, 1); + naddrs = nand_subop_get_num_addr_cyc(subop, 1); + addrs = &instr->ctx.addr.addrs[offset]; + + for (i = 0; i < naddrs; i++) + page |= (u32)addrs[i] << (8 * i); + + return cadence_nand_erase(chip, page); + } + + /* + * If it is not an erase operation then handle operation + * by calling exec_op function. + */ + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + int ret; + const struct nand_operation nand_op = { + .cs = chip->cur_cs, + .instrs = &subop->instrs[op_id], + .ninstrs = 1}; + ret = chip->controller->ops->exec_op(chip, &nand_op, false); + if (ret) + return ret; + } + + return 0; +} + +static int cadence_nand_cmd_data(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + const struct nand_op_instr *instr; + unsigned int offset, op_id = 0; + u64 mini_ctrl_cmd = 0; + int len = 0; + int ret; + + instr = &subop->instrs[op_id]; + + if (instr->delay_ns > 0) + mini_ctrl_cmd |= GCMD_LAY_TWB; + + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, + GCMD_LAY_INSTR_DATA); + + if (instr->type == NAND_OP_DATA_OUT_INSTR) + mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR, + GCMD_DIR_WRITE); + + len = nand_subop_get_data_len(subop, op_id); + offset = nand_subop_get_data_start_off(subop, op_id); + mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1); + mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len); + if (instr->ctx.data.force_8bit) { + ret = cadence_nand_force_byte_access(chip, true); + if (ret) { + dev_err(cdns_ctrl->dev, + "cannot change byte access generic data cmd failed\n"); + return ret; + } + } + + ret = cadence_nand_generic_cmd_send(cdns_ctrl, + cdns_chip->cs[chip->cur_cs], + mini_ctrl_cmd); + if (ret) { + dev_err(cdns_ctrl->dev, "send generic data cmd failed\n"); + return ret; + } + + if (instr->type == NAND_OP_DATA_IN_INSTR) { + void *buf = instr->ctx.data.buf.in + offset; + + ret = cadence_nand_read_buf(cdns_ctrl, buf, len); + } else { + const void *buf = instr->ctx.data.buf.out + offset; + + ret = cadence_nand_write_buf(cdns_ctrl, buf, len); + } + + if (ret) { + dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n"); + return ret; + } + + if (instr->ctx.data.force_8bit) { + ret = cadence_nand_force_byte_access(chip, false); + if (ret) { + dev_err(cdns_ctrl->dev, + "cannot change byte access generic data cmd failed\n"); + } + } + + return ret; +} + +static int cadence_nand_cmd_waitrdy(struct nand_chip *chip, + const struct nand_subop *subop) +{ + int status; + unsigned int op_id = 0; + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + const struct nand_op_instr *instr = &subop->instrs[op_id]; + u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000; + + status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS, + timeout_us, + BIT(cdns_chip->cs[chip->cur_cs]), + false); + return status; +} + +static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_erase, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_opcode, + NAND_OP_PARSER_PAT_CMD_ELEM(false)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_address, + NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_data, + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_data, + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)), + NAND_OP_PARSER_PATTERN( + cadence_nand_cmd_waitrdy, + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)) + ); + +static int cadence_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + int status = cadence_nand_select_target(chip); + + if (status) + return status; + + return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op, + check_only); +} + +static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + + if (section) + return -ERANGE; + + oobregion->offset = cdns_chip->bbm_len; + oobregion->length = cdns_chip->avail_oob_size + - cdns_chip->bbm_len; + + return 0; +} + +static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + + if (section) + return -ERANGE; + + oobregion->offset = cdns_chip->avail_oob_size; + oobregion->length = chip->ecc.total; + + return 0; +} + +static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = { + .free = cadence_nand_ooblayout_free, + .ecc = cadence_nand_ooblayout_ecc, +}; + +static int calc_cycl(u32 timing, u32 clock) +{ + if (timing == 0 || clock == 0) + return 0; + + if ((timing % clock) > 0) + return timing / clock; + else + return timing / clock - 1; +} + +/* Calculate max data valid window. */ +static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min, + u32 board_delay_skew_min, u32 ext_mode) +{ + if (ext_mode == 0) + clk_period /= 2; + + return (trp_cnt + 1) * clk_period + trhoh_min + + board_delay_skew_min; +} + +/* Calculate data valid window. */ +static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min, + u32 trea_max, u32 ext_mode) +{ + if (ext_mode == 0) + clk_period /= 2; + + return (trp_cnt + 1) * clk_period + trhoh_min - trea_max; +} + +static int +cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf) +{ + const struct nand_sdr_timings *sdr; + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + struct cadence_nand_timings *t = &cdns_chip->timings; + u32 reg; + u32 board_delay = cdns_ctrl->board_delay; + u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL, + cdns_ctrl->nf_clk_rate); + u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt; + u32 tfeat_cnt, trhz_cnt, tvdly_cnt; + u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt; + u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0; + u32 if_skew = cdns_ctrl->caps1->if_skew; + u32 board_delay_skew_min = board_delay - if_skew; + u32 board_delay_skew_max = board_delay + if_skew; + u32 dqs_sampl_res, phony_dqs_mod; + u32 tdvw, tdvw_min, tdvw_max; + u32 ext_rd_mode, ext_wr_mode; + u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0; + u32 sampling_point; + + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + memset(t, 0, sizeof(*t)); + /* Sampling point calculation. */ + + if (cdns_ctrl->caps2.is_phy_type_dll) + phony_dqs_mod = 2; + else + phony_dqs_mod = 1; + + dqs_sampl_res = clk_period / phony_dqs_mod; + + tdvw_min = sdr->tREA_max + board_delay_skew_max; + /* + * The idea of those calculation is to get the optimum value + * for tRP and tRH timings. If it is NOT possible to sample data + * with optimal tRP/tRH settings, the parameters will be extended. + * If clk_period is 50ns (the lowest value) this condition is met + * for asynchronous timing modes 1, 2, 3, 4 and 5. + * If clk_period is 20ns the condition is met only + * for asynchronous timing mode 5. + */ + if (sdr->tRC_min <= clk_period && + sdr->tRP_min <= (clk_period / 2) && + sdr->tREH_min <= (clk_period / 2)) { + /* Performance mode. */ + ext_rd_mode = 0; + tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min, + sdr->tREA_max, ext_rd_mode); + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min, + board_delay_skew_min, + ext_rd_mode); + /* + * Check if data valid window and sampling point can be found + * and is not on the edge (ie. we have hold margin). + * If not extend the tRP timings. + */ + if (tdvw > 0) { + if (tdvw_max <= tdvw_min || + (tdvw_max % dqs_sampl_res) == 0) { + /* + * No valid sampling point so the RE pulse need + * to be widen widening by half clock cycle. + */ + ext_rd_mode = 1; + } + } else { + /* + * There is no valid window + * to be able to sample data the tRP need to be widen. + * Very safe calculations are performed here. + */ + trp_cnt = (sdr->tREA_max + board_delay_skew_max + + dqs_sampl_res) / clk_period; + ext_rd_mode = 1; + } + + } else { + /* Extended read mode. */ + u32 trh; + + ext_rd_mode = 1; + trp_cnt = calc_cycl(sdr->tRP_min, clk_period); + trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period); + if (sdr->tREH_min >= trh) + trh_cnt = calc_cycl(sdr->tREH_min, clk_period); + else + trh_cnt = calc_cycl(trh, clk_period); + + tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min, + sdr->tREA_max, ext_rd_mode); + /* + * Check if data valid window and sampling point can be found + * or if it is at the edge check if previous is valid + * - if not extend the tRP timings. + */ + if (tdvw > 0) { + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, + sdr->tRHOH_min, + board_delay_skew_min, + ext_rd_mode); + + if ((((tdvw_max / dqs_sampl_res) + * dqs_sampl_res) <= tdvw_min) || + (((tdvw_max % dqs_sampl_res) == 0) && + (((tdvw_max / dqs_sampl_res - 1) + * dqs_sampl_res) <= tdvw_min))) { + /* + * Data valid window width is lower than + * sampling resolution and do not hit any + * sampling point to be sure the sampling point + * will be found the RE low pulse width will be + * extended by one clock cycle. + */ + trp_cnt = trp_cnt + 1; + } + } else { + /* + * There is no valid window to be able to sample data. + * The tRP need to be widen. + * Very safe calculations are performed here. + */ + trp_cnt = (sdr->tREA_max + board_delay_skew_max + + dqs_sampl_res) / clk_period; + } + } + + tdvw_max = calc_tdvw_max(trp_cnt, clk_period, + sdr->tRHOH_min, + board_delay_skew_min, ext_rd_mode); + + if (sdr->tWC_min <= clk_period && + (sdr->tWP_min + if_skew) <= (clk_period / 2) && + (sdr->tWH_min + if_skew) <= (clk_period / 2)) { + ext_wr_mode = 0; + } else { + u32 twh; + + ext_wr_mode = 1; + twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period); + if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew)) + twp_cnt = calc_cycl(sdr->tALS_min + if_skew, + clk_period); + + twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period); + if (sdr->tWH_min >= twh) + twh = sdr->tWH_min; + + twh_cnt = calc_cycl(twh + if_skew, clk_period); + } + + reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt); + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt); + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt); + reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt); + t->async_toggle_timings = reg; + dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg); + + tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period); + tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period); + twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period); + trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period); + reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt); + + /* + * If timing exceeds delay field in timing register + * then use maximum value. + */ + if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt)) + reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt); + else + reg |= TIMINGS0_TCCS; + + reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt); + reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt); + t->timings0 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg); + + /* The following is related to single signal so skew is not needed. */ + trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period); + trhz_cnt = trhz_cnt + 1; + twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period); + /* + * Because of the two stage syncflop the value must be increased by 3 + * first value is related with sync, second value is related + * with output if delay. + */ + twb_cnt = twb_cnt + 3 + 5; + /* + * The following is related to the we edge of the random data input + * sequence so skew is not needed. + */ + tvdly_cnt = calc_cycl(500000 + if_skew, clk_period); + reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt); + reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt); + reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt); + t->timings1 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg); + + tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period); + if (tfeat_cnt < twb_cnt) + tfeat_cnt = twb_cnt; + + tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period); + tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period); + + reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt); + reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt); + reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt); + t->timings2 = reg; + dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg); + + if (cdns_ctrl->caps2.is_phy_type_dll) { + reg = DLL_PHY_CTRL_DLL_RST_N; + if (ext_wr_mode) + reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE; + if (ext_rd_mode) + reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE; + + reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7); + reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7); + t->dll_phy_ctrl = reg; + dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg); + } + + /* Sampling point calculation. */ + if ((tdvw_max % dqs_sampl_res) > 0) + sampling_point = tdvw_max / dqs_sampl_res; + else + sampling_point = (tdvw_max / dqs_sampl_res - 1); + + if (sampling_point * dqs_sampl_res > tdvw_min) { + dll_phy_dqs_timing = + FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4); + dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS; + phony_dqs_timing = sampling_point / phony_dqs_mod; + + if ((sampling_point % 2) > 0) { + dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL; + if ((tdvw_max % dqs_sampl_res) == 0) + /* + * Calculation for sampling point at the edge + * of data and being odd number. + */ + phony_dqs_timing = (tdvw_max / dqs_sampl_res) + / phony_dqs_mod - 1; + + if (!cdns_ctrl->caps2.is_phy_type_dll) + phony_dqs_timing--; + + } else { + phony_dqs_timing--; + } + rd_del_sel = phony_dqs_timing + 3; + } else { + dev_warn(cdns_ctrl->dev, + "ERROR : cannot find valid sampling point\n"); + } + + reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing); + if (cdns_ctrl->caps2.is_phy_type_dll) + reg |= PHY_CTRL_SDR_DQS; + t->phy_ctrl = reg; + dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg); + + if (cdns_ctrl->caps2.is_phy_type_dll) { + dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0); + dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2); + dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n", + dll_phy_dqs_timing); + t->phy_dqs_timing = dll_phy_dqs_timing; + + reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel); + dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n", + reg); + t->phy_gate_lpbk_ctrl = reg; + + dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n", + PHY_DLL_MASTER_CTRL_BYPASS_MODE); + dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0); + } + + return 0; +} + +int cadence_nand_attach_chip(struct nand_chip *chip) +{ + struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); + struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); + u32 ecc_size = cdns_chip->sector_count * chip->ecc.bytes; + struct mtd_info *mtd = nand_to_mtd(chip); + u32 max_oob_data_size; + int ret; + + if (chip->options & NAND_BUSWIDTH_16) { + ret = cadence_nand_set_access_width16(cdns_ctrl, true); + if (ret) + return ret; + } + + chip->bbt_options |= NAND_BBT_USE_FLASH; + chip->bbt_options |= NAND_BBT_NO_OOB; + chip->ecc.mode = NAND_ECC_HW; + + chip->options |= NAND_NO_SUBPAGE_WRITE; + + cdns_chip->bbm_offs = chip->badblockpos; + if (chip->options & NAND_BUSWIDTH_16) { + cdns_chip->bbm_offs &= ~0x01; + cdns_chip->bbm_len = 2; + } else { + cdns_chip->bbm_len = 1; + } + + ret = nand_ecc_choose_conf(chip, + &cdns_ctrl->ecc_caps, + mtd->oobsize - cdns_chip->bbm_len); + if (ret) { + dev_err(cdns_ctrl->dev, "ECC configuration failed\n"); + return ret; + } + + dev_dbg(cdns_ctrl->dev, + "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", + chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); + + /* Error correction configuration. */ + cdns_chip->sector_size = chip->ecc.size; + cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size; + + cdns_chip->avail_oob_size = mtd->oobsize - ecc_size; + + max_oob_data_size = MAX_OOB_SIZE_PER_SECTOR; + + if (cdns_chip->avail_oob_size > max_oob_data_size) + cdns_chip->avail_oob_size = max_oob_data_size; + + if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size) + > mtd->oobsize) + cdns_chip->avail_oob_size -= 4; + + ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength); + if (ret < 0) + return -EINVAL; + + cdns_chip->corr_str_idx = (u8)ret; + + if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS, + 1000000, + CTRL_STATUS_CTRL_BUSY, true)) + return -ETIMEDOUT; + + cadence_nand_set_ecc_strength(cdns_ctrl, + cdns_chip->corr_str_idx); + + cadence_nand_set_erase_detection(cdns_ctrl, true, + chip->ecc.strength); + + /* Override the default read operations. */ + chip->ecc.read_page = cadence_nand_read_page; + chip->ecc.read_page_raw = cadence_nand_read_page_raw; + chip->ecc.write_page = cadence_nand_write_page; + chip->ecc.write_page_raw = cadence_nand_write_page_raw; + chip->ecc.read_oob = cadence_nand_read_oob; + chip->ecc.write_oob = cadence_nand_write_oob; + chip->ecc.read_oob_raw = cadence_nand_read_oob_raw; + chip->ecc.write_oob_raw = cadence_nand_write_oob_raw; + + if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size) + cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize; + + /* Is 32-bit DMA supported? */ + ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(cdns_ctrl->dev, "no usable DMA configuration\n"); + return ret; + } + + mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops); + + return 0; +} + +static const struct nand_controller_ops cadence_nand_controller_ops = { + .attach_chip = cadence_nand_attach_chip, + .exec_op = cadence_nand_exec_op, + .setup_data_interface = cadence_nand_setup_data_interface, +}; + +static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, + struct device_node *np) +{ + struct cdns_nand_chip *cdns_chip; + struct mtd_info *mtd; + struct nand_chip *chip; + int nsels, ret, i; + u32 cs; + + nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32)); + if (nsels <= 0) { + dev_err(cdns_ctrl->dev, "missing/invalid reg property\n"); + return -EINVAL; + } + + /* Allocate the nand chip structure. */ + cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) + + (nsels * sizeof(u8)), + GFP_KERNEL); + if (!cdns_chip) { + dev_err(cdns_ctrl->dev, "could not allocate chip structure\n"); + return -ENOMEM; + } + + cdns_chip->nsels = nsels; + + for (i = 0; i < nsels; i++) { + /* Retrieve CS id. */ + ret = of_property_read_u32_index(np, "reg", i, &cs); + if (ret) { + dev_err(cdns_ctrl->dev, + "could not retrieve reg property: %d\n", + ret); + return ret; + } + + if (cs >= cdns_ctrl->caps2.max_banks) { + dev_err(cdns_ctrl->dev, + "invalid reg value: %u (max CS = %d)\n", + cs, cdns_ctrl->caps2.max_banks); + return -EINVAL; + } + + if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) { + dev_err(cdns_ctrl->dev, + "CS %d already assigned\n", cs); + return -EINVAL; + } + + cdns_chip->cs[i] = cs; + } + + chip = &cdns_chip->chip; + chip->controller = &cdns_ctrl->controller; + nand_set_flash_node(chip, np); + + mtd = nand_to_mtd(chip); + mtd->dev.parent = cdns_ctrl->dev; + + /* + * Default to HW ECC engine mode. If the nand-ecc-mode property is given + * in the DT node, this entry will be overwritten in nand_scan_ident(). + */ + chip->ecc.mode = NAND_ECC_HW; + + ret = nand_scan(chip, cdns_chip->nsels); + if (ret) { + dev_err(cdns_ctrl->dev, "could not scan the nand chip\n"); + return ret; + } + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(cdns_ctrl->dev, + "failed to register mtd device: %d\n", ret); + nand_cleanup(chip); + return ret; + } + + list_add_tail(&cdns_chip->node, &cdns_ctrl->chips); + + return 0; +} + +static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl) +{ + struct cdns_nand_chip *entry, *temp; + + list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) { + nand_release(&entry->chip); + list_del(&entry->node); + } +} + +static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl) +{ + struct device_node *np = cdns_ctrl->dev->of_node; + struct device_node *nand_np; + int max_cs = cdns_ctrl->caps2.max_banks; + int nchips, ret; + + nchips = of_get_child_count(np); + + if (nchips > max_cs) { + dev_err(cdns_ctrl->dev, + "too many NAND chips: %d (max = %d CS)\n", + nchips, max_cs); + return -EINVAL; + } + + for_each_child_of_node(np, nand_np) { + ret = cadence_nand_chip_init(cdns_ctrl, nand_np); + if (ret) { + of_node_put(nand_np); + cadence_nand_chips_cleanup(cdns_ctrl); + return ret; + } + } + + return 0; +} + +static void +cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl) +{ + /* Disable interrupts. */ + writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE); +} + +static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) +{ + dma_cap_mask_t mask; + int ret; + + cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev, + sizeof(*cdns_ctrl->cdma_desc), + &cdns_ctrl->dma_cdma_desc, + GFP_KERNEL); + if (!cdns_ctrl->dma_cdma_desc) + return -ENOMEM; + + cdns_ctrl->buf_size = SZ_16K; + cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL); + if (!cdns_ctrl->buf) { + ret = -ENOMEM; + goto free_buf_desc; + } + + if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr, + IRQF_SHARED, "cadence-nand-controller", + cdns_ctrl)) { + dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n"); + ret = -ENODEV; + goto free_buf; + } + + spin_lock_init(&cdns_ctrl->irq_lock); + init_completion(&cdns_ctrl->complete); + + ret = cadence_nand_hw_init(cdns_ctrl); + if (ret) + goto disable_irq; + + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + + if (cdns_ctrl->caps1->has_dma) { + cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL); + if (!cdns_ctrl->dmac) { + dev_err(cdns_ctrl->dev, + "Unable to get a DMA channel\n"); + ret = -EBUSY; + goto disable_irq; + } + } + + nand_controller_init(&cdns_ctrl->controller); + INIT_LIST_HEAD(&cdns_ctrl->chips); + + cdns_ctrl->controller.ops = &cadence_nand_controller_ops; + cdns_ctrl->curr_corr_str_idx = 0xFF; + + ret = cadence_nand_chips_init(cdns_ctrl); + if (ret) { + dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n", + ret); + goto dma_release_chnl; + } + + kfree(cdns_ctrl->buf); + cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL); + if (!cdns_ctrl->buf) { + ret = -ENOMEM; + goto dma_release_chnl; + } + + return 0; + +dma_release_chnl: + if (cdns_ctrl->dmac) + dma_release_channel(cdns_ctrl->dmac); + +disable_irq: + cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); + +free_buf: + kfree(cdns_ctrl->buf); + +free_buf_desc: + dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), + cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc); + + return ret; +} + +/* Driver exit point. */ +static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl) +{ + cadence_nand_chips_cleanup(cdns_ctrl); + cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); + kfree(cdns_ctrl->buf); + dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), + cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc); + + if (cdns_ctrl->dmac) + dma_release_channel(cdns_ctrl->dmac); +} + +struct cadence_nand_dt { + struct cdns_nand_ctrl cdns_ctrl; + struct clk *clk; +}; + +static const struct cadence_nand_dt_devdata cadence_nand_default = { + .if_skew = 0, + .has_dma = 1, +}; + +static const struct of_device_id cadence_nand_dt_ids[] = { + { + .compatible = "cdns,hp-nfc", + .data = &cadence_nand_default + }, {} +}; + +MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids); + +static int cadence_nand_dt_probe(struct platform_device *ofdev) +{ + struct resource *res; + struct cadence_nand_dt *dt; + struct cdns_nand_ctrl *cdns_ctrl; + int ret; + const struct of_device_id *of_id; + const struct cadence_nand_dt_devdata *devdata; + u32 val; + + of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev); + if (of_id) { + ofdev->id_entry = of_id->data; + devdata = of_id->data; + } else { + pr_err("Failed to find the right device id.\n"); + return -ENOMEM; + } + + dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL); + if (!dt) + return -ENOMEM; + + cdns_ctrl = &dt->cdns_ctrl; + cdns_ctrl->caps1 = devdata; + + cdns_ctrl->dev = &ofdev->dev; + cdns_ctrl->irq = platform_get_irq(ofdev, 0); + if (cdns_ctrl->irq < 0) + return cdns_ctrl->irq; + + dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq); + + cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0); + if (IS_ERR(cdns_ctrl->reg)) { + dev_err(&ofdev->dev, "devm_ioremap_resource res 0 failed\n"); + return PTR_ERR(cdns_ctrl->reg); + } + + res = platform_get_resource(ofdev, IORESOURCE_MEM, 1); + cdns_ctrl->io.dma = res->start; + cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(cdns_ctrl->io.virt)) { + dev_err(cdns_ctrl->dev, "devm_ioremap_resource res 1 failed\n"); + return PTR_ERR(cdns_ctrl->io.virt); + } + + dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk"); + if (IS_ERR(dt->clk)) + return PTR_ERR(dt->clk); + + cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk); + + ret = of_property_read_u32(ofdev->dev.of_node, + "cdns,board-delay-ps", &val); + if (ret) { + val = 4830; + dev_info(cdns_ctrl->dev, + "missing cdns,board-delay-ps property, %d was set\n", + val); + } + cdns_ctrl->board_delay = val; + + ret = cadence_nand_init(cdns_ctrl); + if (ret) + return ret; + + platform_set_drvdata(ofdev, dt); + return 0; +} + +static int cadence_nand_dt_remove(struct platform_device *ofdev) +{ + struct cadence_nand_dt *dt = platform_get_drvdata(ofdev); + + cadence_nand_remove(&dt->cdns_ctrl); + + return 0; +} + +static struct platform_driver cadence_nand_dt_driver = { + .probe = cadence_nand_dt_probe, + .remove = cadence_nand_dt_remove, + .driver = { + .name = "cadence-nand-controller", + .of_match_table = cadence_nand_dt_ids, + }, +}; + +module_platform_driver(cadence_nand_dt_driver); + +MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Driver for Cadence NAND flash controller"); + diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 3102ddbd8abd..fafd0a0aa8e2 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -21,7 +21,6 @@ #include "denali.h" #define DENALI_NAND_NAME "denali-nand" -#define DENALI_DEFAULT_OOB_SKIP_BYTES 8 /* for Indexed Addressing */ #define DENALI_INDEXED_CTRL 0x00 @@ -1302,15 +1301,16 @@ int denali_init(struct denali_controller *denali) /* * Set how many bytes should be skipped before writing data in OOB. - * If a non-zero value has already been set (by firmware or something), - * just use it. Otherwise, set the driver's default. + * If a platform requests a non-zero value, set it to the register. + * Otherwise, read the value out, expecting it has already been set up + * by firmware. */ - denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); - if (!denali->oob_skip_bytes) { - denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES; + if (denali->oob_skip_bytes) iowrite32(denali->oob_skip_bytes, denali->reg + SPARE_AREA_SKIP_BYTES); - } + else + denali->oob_skip_bytes = ioread32(denali->reg + + SPARE_AREA_SKIP_BYTES); iowrite32(0, denali->reg + TRANSFER_SPARE_REG); iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED); diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c index 5e14836f6bd5..f08740ae282b 100644 --- a/drivers/mtd/nand/raw/denali_dt.c +++ b/drivers/mtd/nand/raw/denali_dt.c @@ -6,6 +6,7 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/ioport.h> @@ -14,6 +15,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/reset.h> #include "denali.h" @@ -22,11 +24,14 @@ struct denali_dt { struct clk *clk; /* core clock */ struct clk *clk_x; /* bus interface clock */ struct clk *clk_ecc; /* ECC circuit clock */ + struct reset_control *rst; /* core reset */ + struct reset_control *rst_reg; /* register reset */ }; struct denali_dt_data { unsigned int revision; unsigned int caps; + unsigned int oob_skip_bytes; const struct nand_ecc_caps *ecc_caps; }; @@ -34,6 +39,7 @@ NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes, 512, 8, 15); static const struct denali_dt_data denali_socfpga_data = { .caps = DENALI_CAP_HW_ECC_FIXUP, + .oob_skip_bytes = 2, .ecc_caps = &denali_socfpga_ecc_caps, }; @@ -42,6 +48,7 @@ NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes, static const struct denali_dt_data denali_uniphier_v5a_data = { .caps = DENALI_CAP_HW_ECC_FIXUP | DENALI_CAP_DMA_64BIT, + .oob_skip_bytes = 8, .ecc_caps = &denali_uniphier_v5a_ecc_caps, }; @@ -51,6 +58,7 @@ static const struct denali_dt_data denali_uniphier_v5b_data = { .revision = 0x0501, .caps = DENALI_CAP_HW_ECC_FIXUP | DENALI_CAP_DMA_64BIT, + .oob_skip_bytes = 8, .ecc_caps = &denali_uniphier_v5b_ecc_caps, }; @@ -102,47 +110,6 @@ static int denali_dt_chip_init(struct denali_controller *denali, return denali_chip_init(denali, dchip); } -/* Backward compatibility for old platforms */ -static int denali_dt_legacy_chip_init(struct denali_controller *denali) -{ - struct denali_chip *dchip; - int nsels, i; - - nsels = denali->nbanks; - - dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels), - GFP_KERNEL); - if (!dchip) - return -ENOMEM; - - dchip->nsels = nsels; - - for (i = 0; i < nsels; i++) - dchip->sels[i].bank = i; - - nand_set_flash_node(&dchip->chip, denali->dev->of_node); - - return denali_chip_init(denali, dchip); -} - -/* - * Check the DT binding. - * The new binding expects chip subnodes in the controller node. - * So, #address-cells = <1>; #size-cells = <0>; are required. - * Check the #size-cells to distinguish the binding. - */ -static bool denali_dt_is_legacy_binding(struct device_node *np) -{ - u32 cells; - int ret; - - ret = of_property_read_u32(np, "#size-cells", &cells); - if (ret) - return true; - - return cells != 0; -} - static int denali_dt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -159,18 +126,18 @@ static int denali_dt_probe(struct platform_device *pdev) denali = &dt->controller; data = of_device_get_match_data(dev); - if (data) { - denali->revision = data->revision; - denali->caps = data->caps; - denali->ecc_caps = data->ecc_caps; - } + if (WARN_ON(!data)) + return -EINVAL; + + denali->revision = data->revision; + denali->caps = data->caps; + denali->oob_skip_bytes = data->oob_skip_bytes; + denali->ecc_caps = data->ecc_caps; denali->dev = dev; denali->irq = platform_get_irq(pdev, 0); - if (denali->irq < 0) { - dev_err(dev, "no irq defined\n"); + if (denali->irq < 0) return denali->irq; - } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg"); denali->reg = devm_ioremap_resource(dev, res); @@ -194,6 +161,14 @@ static int denali_dt_probe(struct platform_device *pdev) if (IS_ERR(dt->clk_ecc)) return PTR_ERR(dt->clk_ecc); + dt->rst = devm_reset_control_get_optional_shared(dev, "nand"); + if (IS_ERR(dt->rst)) + return PTR_ERR(dt->rst); + + dt->rst_reg = devm_reset_control_get_optional_shared(dev, "reg"); + if (IS_ERR(dt->rst_reg)) + return PTR_ERR(dt->rst_reg); + ret = clk_prepare_enable(dt->clk); if (ret) return ret; @@ -209,21 +184,35 @@ static int denali_dt_probe(struct platform_device *pdev) denali->clk_rate = clk_get_rate(dt->clk); denali->clk_x_rate = clk_get_rate(dt->clk_x); - ret = denali_init(denali); + /* + * Deassert the register reset, and the core reset in this order. + * Deasserting the core reset while the register reset is asserted + * will cause unpredictable behavior in the controller. + */ + ret = reset_control_deassert(dt->rst_reg); if (ret) goto out_disable_clk_ecc; - if (denali_dt_is_legacy_binding(dev->of_node)) { - ret = denali_dt_legacy_chip_init(denali); - if (ret) + ret = reset_control_deassert(dt->rst); + if (ret) + goto out_assert_rst_reg; + + /* + * When the reset is deasserted, the initialization sequence is kicked + * (bootstrap process). The driver must wait until it finished. + * Otherwise, it will result in unpredictable behavior. + */ + usleep_range(200, 1000); + + ret = denali_init(denali); + if (ret) + goto out_assert_rst; + + for_each_child_of_node(dev->of_node, np) { + ret = denali_dt_chip_init(denali, np); + if (ret) { + of_node_put(np); goto out_remove_denali; - } else { - for_each_child_of_node(dev->of_node, np) { - ret = denali_dt_chip_init(denali, np); - if (ret) { - of_node_put(np); - goto out_remove_denali; - } } } @@ -233,6 +222,10 @@ static int denali_dt_probe(struct platform_device *pdev) out_remove_denali: denali_remove(denali); +out_assert_rst: + reset_control_assert(dt->rst); +out_assert_rst_reg: + reset_control_assert(dt->rst_reg); out_disable_clk_ecc: clk_disable_unprepare(dt->clk_ecc); out_disable_clk_x: @@ -248,6 +241,8 @@ static int denali_dt_remove(struct platform_device *pdev) struct denali_dt *dt = platform_get_drvdata(pdev); denali_remove(&dt->controller); + reset_control_assert(dt->rst); + reset_control_assert(dt->rst_reg); clk_disable_unprepare(dt->clk_ecc); clk_disable_unprepare(dt->clk_x); clk_disable_unprepare(dt->clk); diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c index d62aa5271753..2f77ee55e1bf 100644 --- a/drivers/mtd/nand/raw/denali_pci.c +++ b/drivers/mtd/nand/raw/denali_pci.c @@ -74,15 +74,15 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return ret; } - denali->reg = ioremap_nocache(csr_base, csr_len); + denali->reg = ioremap(csr_base, csr_len); if (!denali->reg) { dev_err(&dev->dev, "Spectra: Unable to remap memory region\n"); return -ENOMEM; } - denali->host = ioremap_nocache(mem_base, mem_len); + denali->host = ioremap(mem_base, mem_len); if (!denali->host) { - dev_err(&dev->dev, "Spectra: ioremap_nocache failed!"); + dev_err(&dev->dev, "Spectra: ioremap failed!"); ret = -ENOMEM; goto out_unmap_reg; } diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 1054cc070747..f31fae3a4c68 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -285,7 +285,7 @@ static int fun_probe(struct platform_device *ofdev) fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN | FSL_UPM_WAIT_WRITE_BYTE; - fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, + fun->io_base = devm_ioremap(&ofdev->dev, io_res.start, resource_size(&io_res)); if (!fun->io_base) { ret = -ENOMEM; diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 334fe3130285..b9d5d55a5edb 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this) struct resources *r = &this->resources; int ret; + ret = pm_runtime_get_sync(this->dev); + if (ret < 0) + return ret; + ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) goto err_out; @@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this) */ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); - return 0; err_out: + pm_runtime_mark_last_busy(this->dev); + pm_runtime_put_autosuspend(this->dev); return ret; } @@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev) return ret; } + /* Set flag to get timing setup restored for next exec_op */ + if (this->hw.clk_rate) + this->hw.must_apply_timings = true; + /* re-init the BCH registers */ ret = bch_set_geometry(this); if (ret) { diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c index 6a4626a8bf95..0b48be54ba6f 100644 --- a/drivers/mtd/nand/raw/hisi504_nand.c +++ b/drivers/mtd/nand/raw/hisi504_nand.c @@ -751,10 +751,8 @@ static int hisi_nfc_probe(struct platform_device *pdev) mtd = nand_to_mtd(chip); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "no IRQ resource defined\n"); + if (irq < 0) return -ENXIO; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->iobase = devm_ioremap_resource(dev, res); diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig index 66b7cffdb0c2..e30feb56b650 100644 --- a/drivers/mtd/nand/raw/ingenic/Kconfig +++ b/drivers/mtd/nand/raw/ingenic/Kconfig @@ -1,11 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -config MTD_NAND_JZ4740 - tristate "JZ4740 NAND controller" - depends on MACH_JZ4740 || COMPILE_TEST - depends on HAS_IOMEM - help - Enables support for NAND Flash on JZ4740 SoC based boards. - config MTD_NAND_JZ4780 tristate "JZ4780 NAND controller" depends on JZ4780_NEMC diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile index b63d36889263..4c53f5e759c3 100644 --- a/drivers/mtd/nand/raw/ingenic/Makefile +++ b/drivers/mtd/nand/raw/ingenic/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o ingenic_nand-y += ingenic_nand_drv.o diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index d7b7c0f13909..49afebee50db 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -310,7 +310,6 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, struct device *dev = &pdev->dev; struct ingenic_nand *nand; struct ingenic_nand_cs *cs; - struct resource *res; struct nand_chip *chip; struct mtd_info *mtd; const __be32 *reg; @@ -326,8 +325,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND); - res = platform_get_resource(pdev, IORESOURCE_MEM, chipnr); - cs->base = devm_ioremap_resource(dev, res); + cs->base = devm_platform_ioremap_resource(pdev, chipnr); if (IS_ERR(cs->base)) return PTR_ERR(cs->base); @@ -418,6 +416,7 @@ static int ingenic_nand_init_chips(struct ingenic_nfc *nfc, ret = ingenic_nand_init_chip(pdev, nfc, np, i); if (ret) { ingenic_nand_cleanup_chips(nfc); + of_node_put(np); return ret; } diff --git a/drivers/mtd/nand/raw/ingenic/jz4740_nand.c b/drivers/mtd/nand/raw/ingenic/jz4740_nand.c deleted file mode 100644 index acdf674fcc87..000000000000 --- a/drivers/mtd/nand/raw/ingenic/jz4740_nand.c +++ /dev/null @@ -1,536 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> - * JZ4740 SoC NAND controller driver - */ - -#include <linux/io.h> -#include <linux/ioport.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> - -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/partitions.h> - -#include <linux/gpio/consumer.h> - -#include <linux/platform_data/jz4740/jz4740_nand.h> - -#define JZ_REG_NAND_CTRL 0x50 -#define JZ_REG_NAND_ECC_CTRL 0x100 -#define JZ_REG_NAND_DATA 0x104 -#define JZ_REG_NAND_PAR0 0x108 -#define JZ_REG_NAND_PAR1 0x10C -#define JZ_REG_NAND_PAR2 0x110 -#define JZ_REG_NAND_IRQ_STAT 0x114 -#define JZ_REG_NAND_IRQ_CTRL 0x118 -#define JZ_REG_NAND_ERR(x) (0x11C + ((x) << 2)) - -#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4) -#define JZ_NAND_ECC_CTRL_ENCODING BIT(3) -#define JZ_NAND_ECC_CTRL_RS BIT(2) -#define JZ_NAND_ECC_CTRL_RESET BIT(1) -#define JZ_NAND_ECC_CTRL_ENABLE BIT(0) - -#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29)) -#define JZ_NAND_STATUS_PAD_FINISH BIT(4) -#define JZ_NAND_STATUS_DEC_FINISH BIT(3) -#define JZ_NAND_STATUS_ENC_FINISH BIT(2) -#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1) -#define JZ_NAND_STATUS_ERROR BIT(0) - -#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1) -#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1) -#define JZ_NAND_CTRL_ASSERT_CHIP_MASK 0xaa - -#define JZ_NAND_MEM_CMD_OFFSET 0x08000 -#define JZ_NAND_MEM_ADDR_OFFSET 0x10000 - -struct jz_nand { - struct nand_chip chip; - void __iomem *base; - struct resource *mem; - - unsigned char banks[JZ_NAND_NUM_BANKS]; - void __iomem *bank_base[JZ_NAND_NUM_BANKS]; - struct resource *bank_mem[JZ_NAND_NUM_BANKS]; - - int selected_bank; - - struct gpio_desc *busy_gpio; - bool is_reading; -}; - -static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd) -{ - return container_of(mtd_to_nand(mtd), struct jz_nand, chip); -} - -static void jz_nand_select_chip(struct nand_chip *chip, int chipnr) -{ - struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip)); - uint32_t ctrl; - int banknr; - - ctrl = readl(nand->base + JZ_REG_NAND_CTRL); - ctrl &= ~JZ_NAND_CTRL_ASSERT_CHIP_MASK; - - if (chipnr == -1) { - banknr = -1; - } else { - banknr = nand->banks[chipnr] - 1; - chip->legacy.IO_ADDR_R = nand->bank_base[banknr]; - chip->legacy.IO_ADDR_W = nand->bank_base[banknr]; - } - writel(ctrl, nand->base + JZ_REG_NAND_CTRL); - - nand->selected_bank = banknr; -} - -static void jz_nand_cmd_ctrl(struct nand_chip *chip, int dat, - unsigned int ctrl) -{ - struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip)); - uint32_t reg; - void __iomem *bank_base = nand->bank_base[nand->selected_bank]; - - BUG_ON(nand->selected_bank < 0); - - if (ctrl & NAND_CTRL_CHANGE) { - BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE)); - if (ctrl & NAND_ALE) - bank_base += JZ_NAND_MEM_ADDR_OFFSET; - else if (ctrl & NAND_CLE) - bank_base += JZ_NAND_MEM_CMD_OFFSET; - chip->legacy.IO_ADDR_W = bank_base; - - reg = readl(nand->base + JZ_REG_NAND_CTRL); - if (ctrl & NAND_NCE) - reg |= JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank); - else - reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank); - writel(reg, nand->base + JZ_REG_NAND_CTRL); - } - if (dat != NAND_CMD_NONE) - writeb(dat, chip->legacy.IO_ADDR_W); -} - -static int jz_nand_dev_ready(struct nand_chip *chip) -{ - struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip)); - return gpiod_get_value_cansleep(nand->busy_gpio); -} - -static void jz_nand_hwctl(struct nand_chip *chip, int mode) -{ - struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip)); - uint32_t reg; - - writel(0, nand->base + JZ_REG_NAND_IRQ_STAT); - reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); - - reg |= JZ_NAND_ECC_CTRL_RESET; - reg |= JZ_NAND_ECC_CTRL_ENABLE; - reg |= JZ_NAND_ECC_CTRL_RS; - - switch (mode) { - case NAND_ECC_READ: - reg &= ~JZ_NAND_ECC_CTRL_ENCODING; - nand->is_reading = true; - break; - case NAND_ECC_WRITE: - reg |= JZ_NAND_ECC_CTRL_ENCODING; - nand->is_reading = false; - break; - default: - break; - } - - writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); -} - -static int jz_nand_calculate_ecc_rs(struct nand_chip *chip, const uint8_t *dat, - uint8_t *ecc_code) -{ - struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip)); - uint32_t reg, status; - int i; - unsigned int timeout = 1000; - static uint8_t empty_block_ecc[] = {0xcd, 0x9d, 0x90, 0x58, 0xf4, - 0x8b, 0xff, 0xb7, 0x6f}; - - if (nand->is_reading) - return 0; - - do { - status = readl(nand->base + JZ_REG_NAND_IRQ_STAT); - } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout); - - if (timeout == 0) - return -1; - - reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); - reg &= ~JZ_NAND_ECC_CTRL_ENABLE; - writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); - - for (i = 0; i < 9; ++i) - ecc_code[i] = readb(nand->base + JZ_REG_NAND_PAR0 + i); - - /* If the written data is completly 0xff, we also want to write 0xff as - * ecc, otherwise we will get in trouble when doing subpage writes. */ - if (memcmp(ecc_code, empty_block_ecc, 9) == 0) - memset(ecc_code, 0xff, 9); - - return 0; -} - -static void jz_nand_correct_data(uint8_t *dat, int index, int mask) -{ - int offset = index & 0x7; - uint16_t data; - - index += (index >> 3); - - data = dat[index]; - data |= dat[index+1] << 8; - - mask ^= (data >> offset) & 0x1ff; - data &= ~(0x1ff << offset); - data |= (mask << offset); - - dat[index] = data & 0xff; - dat[index+1] = (data >> 8) & 0xff; -} - -static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat, - uint8_t *read_ecc, uint8_t *calc_ecc) -{ - struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip)); - int i, error_count, index; - uint32_t reg, status, error; - unsigned int timeout = 1000; - - for (i = 0; i < 9; ++i) - writeb(read_ecc[i], nand->base + JZ_REG_NAND_PAR0 + i); - - reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); - reg |= JZ_NAND_ECC_CTRL_PAR_READY; - writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); - - do { - status = readl(nand->base + JZ_REG_NAND_IRQ_STAT); - } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout); - - if (timeout == 0) - return -ETIMEDOUT; - - reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); - reg &= ~JZ_NAND_ECC_CTRL_ENABLE; - writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); - - if (status & JZ_NAND_STATUS_ERROR) { - if (status & JZ_NAND_STATUS_UNCOR_ERROR) - return -EBADMSG; - - error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29; - - for (i = 0; i < error_count; ++i) { - error = readl(nand->base + JZ_REG_NAND_ERR(i)); - index = ((error >> 16) & 0x1ff) - 1; - if (index >= 0 && index < 512) - jz_nand_correct_data(dat, index, error & 0x1ff); - } - - return error_count; - } - - return 0; -} - -static int jz_nand_ioremap_resource(struct platform_device *pdev, - const char *name, struct resource **res, void __iomem **base) -{ - int ret; - - *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - if (!*res) { - dev_err(&pdev->dev, "Failed to get platform %s memory\n", name); - ret = -ENXIO; - goto err; - } - - *res = request_mem_region((*res)->start, resource_size(*res), - pdev->name); - if (!*res) { - dev_err(&pdev->dev, "Failed to request %s memory region\n", name); - ret = -EBUSY; - goto err; - } - - *base = ioremap((*res)->start, resource_size(*res)); - if (!*base) { - dev_err(&pdev->dev, "Failed to ioremap %s memory region\n", name); - ret = -EBUSY; - goto err_release_mem; - } - - return 0; - -err_release_mem: - release_mem_region((*res)->start, resource_size(*res)); -err: - *res = NULL; - *base = NULL; - return ret; -} - -static inline void jz_nand_iounmap_resource(struct resource *res, - void __iomem *base) -{ - iounmap(base); - release_mem_region(res->start, resource_size(res)); -} - -static int jz_nand_detect_bank(struct platform_device *pdev, - struct jz_nand *nand, unsigned char bank, - size_t chipnr, uint8_t *nand_maf_id, - uint8_t *nand_dev_id) -{ - int ret; - char res_name[6]; - uint32_t ctrl; - struct nand_chip *chip = &nand->chip; - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_memory_organization *memorg; - u8 id[2]; - - memorg = nanddev_get_memorg(&chip->base); - - /* Request I/O resource. */ - sprintf(res_name, "bank%d", bank); - ret = jz_nand_ioremap_resource(pdev, res_name, - &nand->bank_mem[bank - 1], - &nand->bank_base[bank - 1]); - if (ret) - return ret; - - /* Enable chip in bank. */ - ctrl = readl(nand->base + JZ_REG_NAND_CTRL); - ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1); - writel(ctrl, nand->base + JZ_REG_NAND_CTRL); - - if (chipnr == 0) { - /* Detect first chip. */ - ret = nand_scan(chip, 1); - if (ret) - goto notfound_id; - - /* Retrieve the IDs from the first chip. */ - nand_select_target(chip, 0); - nand_reset_op(chip); - nand_readid_op(chip, 0, id, sizeof(id)); - *nand_maf_id = id[0]; - *nand_dev_id = id[1]; - } else { - /* Detect additional chip. */ - nand_select_target(chip, chipnr); - nand_reset_op(chip); - nand_readid_op(chip, 0, id, sizeof(id)); - if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) { - ret = -ENODEV; - goto notfound_id; - } - - /* Update size of the MTD. */ - memorg->ntargets++; - mtd->size += nanddev_target_size(&chip->base); - } - - dev_info(&pdev->dev, "Found chip %zu on bank %i\n", chipnr, bank); - return 0; - -notfound_id: - dev_info(&pdev->dev, "No chip found on bank %i\n", bank); - ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1)); - writel(ctrl, nand->base + JZ_REG_NAND_CTRL); - jz_nand_iounmap_resource(nand->bank_mem[bank - 1], - nand->bank_base[bank - 1]); - return ret; -} - -static int jz_nand_attach_chip(struct nand_chip *chip) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct device *dev = mtd->dev.parent; - struct jz_nand_platform_data *pdata = dev_get_platdata(dev); - struct platform_device *pdev = to_platform_device(dev); - - if (pdata && pdata->ident_callback) - pdata->ident_callback(pdev, mtd, &pdata->partitions, - &pdata->num_partitions); - - return 0; -} - -static const struct nand_controller_ops jz_nand_controller_ops = { - .attach_chip = jz_nand_attach_chip, -}; - -static int jz_nand_probe(struct platform_device *pdev) -{ - int ret; - struct jz_nand *nand; - struct nand_chip *chip; - struct mtd_info *mtd; - struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); - size_t chipnr, bank_idx; - uint8_t nand_maf_id = 0, nand_dev_id = 0; - - nand = kzalloc(sizeof(*nand), GFP_KERNEL); - if (!nand) - return -ENOMEM; - - ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base); - if (ret) - goto err_free; - - nand->busy_gpio = devm_gpiod_get_optional(&pdev->dev, "busy", GPIOD_IN); - if (IS_ERR(nand->busy_gpio)) { - ret = PTR_ERR(nand->busy_gpio); - dev_err(&pdev->dev, "Failed to request busy gpio %d\n", - ret); - goto err_iounmap_mmio; - } - - chip = &nand->chip; - mtd = nand_to_mtd(chip); - mtd->dev.parent = &pdev->dev; - mtd->name = "jz4740-nand"; - - chip->ecc.hwctl = jz_nand_hwctl; - chip->ecc.calculate = jz_nand_calculate_ecc_rs; - chip->ecc.correct = jz_nand_correct_ecc_rs; - chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; - chip->ecc.size = 512; - chip->ecc.bytes = 9; - chip->ecc.strength = 4; - chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; - - chip->legacy.chip_delay = 50; - chip->legacy.cmd_ctrl = jz_nand_cmd_ctrl; - chip->legacy.select_chip = jz_nand_select_chip; - chip->legacy.dummy_controller.ops = &jz_nand_controller_ops; - - if (nand->busy_gpio) - chip->legacy.dev_ready = jz_nand_dev_ready; - - platform_set_drvdata(pdev, nand); - - /* We are going to autodetect NAND chips in the banks specified in the - * platform data. Although nand_scan_ident() can detect multiple chips, - * it requires those chips to be numbered consecuitively, which is not - * always the case for external memory banks. And a fixed chip-to-bank - * mapping is not practical either, since for example Dingoo units - * produced at different times have NAND chips in different banks. - */ - chipnr = 0; - for (bank_idx = 0; bank_idx < JZ_NAND_NUM_BANKS; bank_idx++) { - unsigned char bank; - - /* If there is no platform data, look for NAND in bank 1, - * which is the most likely bank since it is the only one - * that can be booted from. - */ - bank = pdata ? pdata->banks[bank_idx] : bank_idx ^ 1; - if (bank == 0) - break; - if (bank > JZ_NAND_NUM_BANKS) { - dev_warn(&pdev->dev, - "Skipping non-existing bank: %d\n", bank); - continue; - } - /* The detection routine will directly or indirectly call - * jz_nand_select_chip(), so nand->banks has to contain the - * bank we're checking. - */ - nand->banks[chipnr] = bank; - if (jz_nand_detect_bank(pdev, nand, bank, chipnr, - &nand_maf_id, &nand_dev_id) == 0) - chipnr++; - else - nand->banks[chipnr] = 0; - } - if (chipnr == 0) { - dev_err(&pdev->dev, "No NAND chips found\n"); - goto err_iounmap_mmio; - } - - ret = mtd_device_register(mtd, pdata ? pdata->partitions : NULL, - pdata ? pdata->num_partitions : 0); - - if (ret) { - dev_err(&pdev->dev, "Failed to add mtd device\n"); - goto err_cleanup_nand; - } - - dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n"); - - return 0; - -err_cleanup_nand: - nand_cleanup(chip); - while (chipnr--) { - unsigned char bank = nand->banks[chipnr]; - jz_nand_iounmap_resource(nand->bank_mem[bank - 1], - nand->bank_base[bank - 1]); - } - writel(0, nand->base + JZ_REG_NAND_CTRL); -err_iounmap_mmio: - jz_nand_iounmap_resource(nand->mem, nand->base); -err_free: - kfree(nand); - return ret; -} - -static int jz_nand_remove(struct platform_device *pdev) -{ - struct jz_nand *nand = platform_get_drvdata(pdev); - size_t i; - - nand_release(&nand->chip); - - /* Deassert and disable all chips */ - writel(0, nand->base + JZ_REG_NAND_CTRL); - - for (i = 0; i < JZ_NAND_NUM_BANKS; ++i) { - unsigned char bank = nand->banks[i]; - if (bank != 0) { - jz_nand_iounmap_resource(nand->bank_mem[bank - 1], - nand->bank_base[bank - 1]); - } - } - - jz_nand_iounmap_resource(nand->mem, nand->base); - - kfree(nand); - - return 0; -} - -static struct platform_driver jz_nand_driver = { - .probe = jz_nand_probe, - .remove = jz_nand_remove, - .driver = { - .name = "jz4740-nand", - }, -}; - -module_platform_driver(jz_nand_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); -MODULE_DESCRIPTION("NAND controller driver for JZ4740 SoC"); -MODULE_ALIAS("platform:jz4740-nand"); diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index 78b31f845c50..241b58b83240 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -773,7 +773,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { - dev_err(&pdev->dev, "failed to get platform irq\n"); res = -EINVAL; goto release_dma_chan; } diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index fc49e13d81ec..fb5abdcfb007 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2862,10 +2862,8 @@ static int marvell_nfc_probe(struct platform_device *pdev) return PTR_ERR(nfc->regs); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to retrieve irq\n"); + if (irq < 0) return irq; - } nfc->core_clk = devm_clk_get(&pdev->dev, "core"); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index ea57ddcec41e..9f17b5b8efbf 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1320,6 +1320,7 @@ static int meson_nfc_nand_chips_init(struct device *dev, ret = meson_nfc_nand_chip_init(dev, nfc, nand_np); if (ret) { meson_nfc_nand_chip_cleanup(nfc); + of_node_put(nand_np); return ret; } } @@ -1398,10 +1399,8 @@ static int meson_nfc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "no NFC IRQ resource\n"); + if (irq < 0) return -EINVAL; - } ret = meson_nfc_clk_init(nfc); if (ret) { diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index 8b90def6686f..a2fcb739e5f8 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -438,7 +438,7 @@ static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset, buffer += blksize; offset += blksize; size -= blksize; - }; + } } /* Copy data from/to NFC main and spare buffers */ diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c index 74595b644b7c..75f1fa3d4d35 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.c +++ b/drivers/mtd/nand/raw/mtk_ecc.c @@ -527,10 +527,8 @@ static int mtk_ecc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to get irq: %d\n", irq); + if (irq < 0) return irq; - } ret = dma_set_mask(dev, DMA_BIT_MASK(32)); if (ret) { diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 373d47d1ba4c..b8305e39ab51 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1540,7 +1540,6 @@ static int mtk_nfc_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "no nfi irq resource\n"); ret = -EINVAL; goto clk_disable; } diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c new file mode 100644 index 000000000000..ed7a4e021bf5 --- /dev/null +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -0,0 +1,580 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Macronix International Co., Ltd. + * + * Author: + * Mason Yang <masonccyang@mxic.com.tw> + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/mtd/nand_ecc.h> +#include <linux/platform_device.h> + +#include "internals.h" + +#define HC_CFG 0x0 +#define HC_CFG_IF_CFG(x) ((x) << 27) +#define HC_CFG_DUAL_SLAVE BIT(31) +#define HC_CFG_INDIVIDUAL BIT(30) +#define HC_CFG_NIO(x) (((x) / 4) << 27) +#define HC_CFG_TYPE(s, t) ((t) << (23 + ((s) * 2))) +#define HC_CFG_TYPE_SPI_NOR 0 +#define HC_CFG_TYPE_SPI_NAND 1 +#define HC_CFG_TYPE_SPI_RAM 2 +#define HC_CFG_TYPE_RAW_NAND 3 +#define HC_CFG_SLV_ACT(x) ((x) << 21) +#define HC_CFG_CLK_PH_EN BIT(20) +#define HC_CFG_CLK_POL_INV BIT(19) +#define HC_CFG_BIG_ENDIAN BIT(18) +#define HC_CFG_DATA_PASS BIT(17) +#define HC_CFG_IDLE_SIO_LVL(x) ((x) << 16) +#define HC_CFG_MAN_START_EN BIT(3) +#define HC_CFG_MAN_START BIT(2) +#define HC_CFG_MAN_CS_EN BIT(1) +#define HC_CFG_MAN_CS_ASSERT BIT(0) + +#define INT_STS 0x4 +#define INT_STS_EN 0x8 +#define INT_SIG_EN 0xc +#define INT_STS_ALL GENMASK(31, 0) +#define INT_RDY_PIN BIT(26) +#define INT_RDY_SR BIT(25) +#define INT_LNR_SUSP BIT(24) +#define INT_ECC_ERR BIT(17) +#define INT_CRC_ERR BIT(16) +#define INT_LWR_DIS BIT(12) +#define INT_LRD_DIS BIT(11) +#define INT_SDMA_INT BIT(10) +#define INT_DMA_FINISH BIT(9) +#define INT_RX_NOT_FULL BIT(3) +#define INT_RX_NOT_EMPTY BIT(2) +#define INT_TX_NOT_FULL BIT(1) +#define INT_TX_EMPTY BIT(0) + +#define HC_EN 0x10 +#define HC_EN_BIT BIT(0) + +#define TXD(x) (0x14 + ((x) * 4)) +#define RXD 0x24 + +#define SS_CTRL(s) (0x30 + ((s) * 4)) +#define LRD_CFG 0x44 +#define LWR_CFG 0x80 +#define RWW_CFG 0x70 +#define OP_READ BIT(23) +#define OP_DUMMY_CYC(x) ((x) << 17) +#define OP_ADDR_BYTES(x) ((x) << 14) +#define OP_CMD_BYTES(x) (((x) - 1) << 13) +#define OP_OCTA_CRC_EN BIT(12) +#define OP_DQS_EN BIT(11) +#define OP_ENHC_EN BIT(10) +#define OP_PREAMBLE_EN BIT(9) +#define OP_DATA_DDR BIT(8) +#define OP_DATA_BUSW(x) ((x) << 6) +#define OP_ADDR_DDR BIT(5) +#define OP_ADDR_BUSW(x) ((x) << 3) +#define OP_CMD_DDR BIT(2) +#define OP_CMD_BUSW(x) (x) +#define OP_BUSW_1 0 +#define OP_BUSW_2 1 +#define OP_BUSW_4 2 +#define OP_BUSW_8 3 + +#define OCTA_CRC 0x38 +#define OCTA_CRC_IN_EN(s) BIT(3 + ((s) * 16)) +#define OCTA_CRC_CHUNK(s, x) ((fls((x) / 32)) << (1 + ((s) * 16))) +#define OCTA_CRC_OUT_EN(s) BIT(0 + ((s) * 16)) + +#define ONFI_DIN_CNT(s) (0x3c + (s)) + +#define LRD_CTRL 0x48 +#define RWW_CTRL 0x74 +#define LWR_CTRL 0x84 +#define LMODE_EN BIT(31) +#define LMODE_SLV_ACT(x) ((x) << 21) +#define LMODE_CMD1(x) ((x) << 8) +#define LMODE_CMD0(x) (x) + +#define LRD_ADDR 0x4c +#define LWR_ADDR 0x88 +#define LRD_RANGE 0x50 +#define LWR_RANGE 0x8c + +#define AXI_SLV_ADDR 0x54 + +#define DMAC_RD_CFG 0x58 +#define DMAC_WR_CFG 0x94 +#define DMAC_CFG_PERIPH_EN BIT(31) +#define DMAC_CFG_ALLFLUSH_EN BIT(30) +#define DMAC_CFG_LASTFLUSH_EN BIT(29) +#define DMAC_CFG_QE(x) (((x) + 1) << 16) +#define DMAC_CFG_BURST_LEN(x) (((x) + 1) << 12) +#define DMAC_CFG_BURST_SZ(x) ((x) << 8) +#define DMAC_CFG_DIR_READ BIT(1) +#define DMAC_CFG_START BIT(0) + +#define DMAC_RD_CNT 0x5c +#define DMAC_WR_CNT 0x98 + +#define SDMA_ADDR 0x60 + +#define DMAM_CFG 0x64 +#define DMAM_CFG_START BIT(31) +#define DMAM_CFG_CONT BIT(30) +#define DMAM_CFG_SDMA_GAP(x) (fls((x) / 8192) << 2) +#define DMAM_CFG_DIR_READ BIT(1) +#define DMAM_CFG_EN BIT(0) + +#define DMAM_CNT 0x68 + +#define LNR_TIMER_TH 0x6c + +#define RDM_CFG0 0x78 +#define RDM_CFG0_POLY(x) (x) + +#define RDM_CFG1 0x7c +#define RDM_CFG1_RDM_EN BIT(31) +#define RDM_CFG1_SEED(x) (x) + +#define LWR_SUSP_CTRL 0x90 +#define LWR_SUSP_CTRL_EN BIT(31) + +#define DMAS_CTRL 0x9c +#define DMAS_CTRL_EN BIT(31) +#define DMAS_CTRL_DIR_READ BIT(30) + +#define DATA_STROB 0xa0 +#define DATA_STROB_EDO_EN BIT(2) +#define DATA_STROB_INV_POL BIT(1) +#define DATA_STROB_DELAY_2CYC BIT(0) + +#define IDLY_CODE(x) (0xa4 + ((x) * 4)) +#define IDLY_CODE_VAL(x, v) ((v) << (((x) % 4) * 8)) + +#define GPIO 0xc4 +#define GPIO_PT(x) BIT(3 + ((x) * 16)) +#define GPIO_RESET(x) BIT(2 + ((x) * 16)) +#define GPIO_HOLDB(x) BIT(1 + ((x) * 16)) +#define GPIO_WPB(x) BIT((x) * 16) + +#define HC_VER 0xd0 + +#define HW_TEST(x) (0xe0 + ((x) * 4)) + +#define MXIC_NFC_MAX_CLK_HZ 50000000 +#define IRQ_TIMEOUT 1000 + +struct mxic_nand_ctlr { + struct clk *ps_clk; + struct clk *send_clk; + struct clk *send_dly_clk; + struct completion complete; + void __iomem *regs; + struct nand_controller controller; + struct device *dev; + struct nand_chip chip; +}; + +static int mxic_nfc_clk_enable(struct mxic_nand_ctlr *nfc) +{ + int ret; + + ret = clk_prepare_enable(nfc->ps_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(nfc->send_clk); + if (ret) + goto err_ps_clk; + + ret = clk_prepare_enable(nfc->send_dly_clk); + if (ret) + goto err_send_dly_clk; + + return ret; + +err_send_dly_clk: + clk_disable_unprepare(nfc->send_clk); +err_ps_clk: + clk_disable_unprepare(nfc->ps_clk); + + return ret; +} + +static void mxic_nfc_clk_disable(struct mxic_nand_ctlr *nfc) +{ + clk_disable_unprepare(nfc->send_clk); + clk_disable_unprepare(nfc->send_dly_clk); + clk_disable_unprepare(nfc->ps_clk); +} + +static void mxic_nfc_set_input_delay(struct mxic_nand_ctlr *nfc, u8 idly_code) +{ + writel(IDLY_CODE_VAL(0, idly_code) | + IDLY_CODE_VAL(1, idly_code) | + IDLY_CODE_VAL(2, idly_code) | + IDLY_CODE_VAL(3, idly_code), + nfc->regs + IDLY_CODE(0)); + writel(IDLY_CODE_VAL(4, idly_code) | + IDLY_CODE_VAL(5, idly_code) | + IDLY_CODE_VAL(6, idly_code) | + IDLY_CODE_VAL(7, idly_code), + nfc->regs + IDLY_CODE(1)); +} + +static int mxic_nfc_clk_setup(struct mxic_nand_ctlr *nfc, unsigned long freq) +{ + int ret; + + ret = clk_set_rate(nfc->send_clk, freq); + if (ret) + return ret; + + ret = clk_set_rate(nfc->send_dly_clk, freq); + if (ret) + return ret; + + /* + * A constant delay range from 0x0 ~ 0x1F for input delay, + * the unit is 78 ps, the max input delay is 2.418 ns. + */ + mxic_nfc_set_input_delay(nfc, 0xf); + + /* + * Phase degree = 360 * freq * output-delay + * where output-delay is a constant value 1 ns in FPGA. + * + * Get Phase degree = 360 * freq * 1 ns + * = 360 * freq * 1 sec / 1000000000 + * = 9 * freq / 25000000 + */ + ret = clk_set_phase(nfc->send_dly_clk, 9 * freq / 25000000); + if (ret) + return ret; + + return 0; +} + +static int mxic_nfc_set_freq(struct mxic_nand_ctlr *nfc, unsigned long freq) +{ + int ret; + + if (freq > MXIC_NFC_MAX_CLK_HZ) + freq = MXIC_NFC_MAX_CLK_HZ; + + mxic_nfc_clk_disable(nfc); + ret = mxic_nfc_clk_setup(nfc, freq); + if (ret) + return ret; + + ret = mxic_nfc_clk_enable(nfc); + if (ret) + return ret; + + return 0; +} + +static irqreturn_t mxic_nfc_isr(int irq, void *dev_id) +{ + struct mxic_nand_ctlr *nfc = dev_id; + u32 sts; + + sts = readl(nfc->regs + INT_STS); + if (sts & INT_RDY_PIN) + complete(&nfc->complete); + else + return IRQ_NONE; + + return IRQ_HANDLED; +} + +static void mxic_nfc_hw_init(struct mxic_nand_ctlr *nfc) +{ + writel(HC_CFG_NIO(8) | HC_CFG_TYPE(1, HC_CFG_TYPE_RAW_NAND) | + HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN | + HC_CFG_IDLE_SIO_LVL(1), nfc->regs + HC_CFG); + writel(INT_STS_ALL, nfc->regs + INT_STS_EN); + writel(INT_RDY_PIN, nfc->regs + INT_SIG_EN); + writel(0x0, nfc->regs + ONFI_DIN_CNT(0)); + writel(0, nfc->regs + LRD_CFG); + writel(0, nfc->regs + LRD_CTRL); + writel(0x0, nfc->regs + HC_EN); +} + +static void mxic_nfc_cs_enable(struct mxic_nand_ctlr *nfc) +{ + writel(readl(nfc->regs + HC_CFG) | HC_CFG_MAN_CS_EN, + nfc->regs + HC_CFG); + writel(HC_CFG_MAN_CS_ASSERT | readl(nfc->regs + HC_CFG), + nfc->regs + HC_CFG); +} + +static void mxic_nfc_cs_disable(struct mxic_nand_ctlr *nfc) +{ + writel(~HC_CFG_MAN_CS_ASSERT & readl(nfc->regs + HC_CFG), + nfc->regs + HC_CFG); +} + +static int mxic_nfc_wait_ready(struct nand_chip *chip) +{ + struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip); + int ret; + + ret = wait_for_completion_timeout(&nfc->complete, + msecs_to_jiffies(IRQ_TIMEOUT)); + if (!ret) { + dev_err(nfc->dev, "nand device timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int mxic_nfc_data_xfer(struct mxic_nand_ctlr *nfc, const void *txbuf, + void *rxbuf, unsigned int len) +{ + unsigned int pos = 0; + + while (pos < len) { + unsigned int nbytes = len - pos; + u32 data = 0xffffffff; + u32 sts; + int ret; + + if (nbytes > 4) + nbytes = 4; + + if (txbuf) + memcpy(&data, txbuf + pos, nbytes); + + ret = readl_poll_timeout(nfc->regs + INT_STS, sts, + sts & INT_TX_EMPTY, 0, USEC_PER_SEC); + if (ret) + return ret; + + writel(data, nfc->regs + TXD(nbytes % 4)); + + ret = readl_poll_timeout(nfc->regs + INT_STS, sts, + sts & INT_TX_EMPTY, 0, USEC_PER_SEC); + if (ret) + return ret; + + ret = readl_poll_timeout(nfc->regs + INT_STS, sts, + sts & INT_RX_NOT_EMPTY, 0, + USEC_PER_SEC); + if (ret) + return ret; + + data = readl(nfc->regs + RXD); + if (rxbuf) { + data >>= (8 * (4 - nbytes)); + memcpy(rxbuf + pos, &data, nbytes); + } + if (readl(nfc->regs + INT_STS) & INT_RX_NOT_EMPTY) + dev_warn(nfc->dev, "RX FIFO not empty\n"); + + pos += nbytes; + } + + return 0; +} + +static int mxic_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, bool check_only) +{ + struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip); + const struct nand_op_instr *instr = NULL; + int ret = 0; + unsigned int op_id; + + mxic_nfc_cs_enable(nfc); + init_completion(&nfc->complete); + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writel(0, nfc->regs + HC_EN); + writel(HC_EN_BIT, nfc->regs + HC_EN); + writel(OP_CMD_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) | + OP_CMD_BYTES(0), nfc->regs + SS_CTRL(0)); + + ret = mxic_nfc_data_xfer(nfc, + &instr->ctx.cmd.opcode, + NULL, 1); + break; + + case NAND_OP_ADDR_INSTR: + writel(OP_ADDR_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) | + OP_ADDR_BYTES(instr->ctx.addr.naddrs), + nfc->regs + SS_CTRL(0)); + ret = mxic_nfc_data_xfer(nfc, + instr->ctx.addr.addrs, NULL, + instr->ctx.addr.naddrs); + break; + + case NAND_OP_DATA_IN_INSTR: + writel(0x0, nfc->regs + ONFI_DIN_CNT(0)); + writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) | + OP_READ, nfc->regs + SS_CTRL(0)); + ret = mxic_nfc_data_xfer(nfc, NULL, + instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + writel(instr->ctx.data.len, + nfc->regs + ONFI_DIN_CNT(0)); + writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F), + nfc->regs + SS_CTRL(0)); + ret = mxic_nfc_data_xfer(nfc, + instr->ctx.data.buf.out, NULL, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + ret = mxic_nfc_wait_ready(chip); + break; + } + } + mxic_nfc_cs_disable(nfc); + + return ret; +} + +static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf) +{ + struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip); + const struct nand_sdr_timings *sdr; + unsigned long freq; + int ret; + + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + freq = NSEC_PER_SEC / (sdr->tRC_min / 1000); + + ret = mxic_nfc_set_freq(nfc, freq); + if (ret) + dev_err(nfc->dev, "set freq:%ld failed\n", freq); + + if (sdr->tRC_min < 30000) + writel(DATA_STROB_EDO_EN, nfc->regs + DATA_STROB); + + return 0; +} + +static const struct nand_controller_ops mxic_nand_controller_ops = { + .exec_op = mxic_nfc_exec_op, + .setup_data_interface = mxic_nfc_setup_data_interface, +}; + +static int mxic_nfc_probe(struct platform_device *pdev) +{ + struct device_node *nand_np, *np = pdev->dev.of_node; + struct mtd_info *mtd; + struct mxic_nand_ctlr *nfc; + struct nand_chip *nand_chip; + int err; + int irq; + + nfc = devm_kzalloc(&pdev->dev, sizeof(struct mxic_nand_ctlr), + GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->ps_clk = devm_clk_get(&pdev->dev, "ps"); + if (IS_ERR(nfc->ps_clk)) + return PTR_ERR(nfc->ps_clk); + + nfc->send_clk = devm_clk_get(&pdev->dev, "send"); + if (IS_ERR(nfc->send_clk)) + return PTR_ERR(nfc->send_clk); + + nfc->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly"); + if (IS_ERR(nfc->send_dly_clk)) + return PTR_ERR(nfc->send_dly_clk); + + nfc->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(nfc->regs)) + return PTR_ERR(nfc->regs); + + nand_chip = &nfc->chip; + mtd = nand_to_mtd(nand_chip); + mtd->dev.parent = &pdev->dev; + + for_each_child_of_node(np, nand_np) + nand_set_flash_node(nand_chip, nand_np); + + nand_chip->priv = nfc; + nfc->dev = &pdev->dev; + nfc->controller.ops = &mxic_nand_controller_ops; + nand_controller_init(&nfc->controller); + nand_chip->controller = &nfc->controller; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mxic_nfc_hw_init(nfc); + + err = devm_request_irq(&pdev->dev, irq, mxic_nfc_isr, + 0, "mxic-nfc", nfc); + if (err) + goto fail; + + err = nand_scan(nand_chip, 1); + if (err) + goto fail; + + err = mtd_device_register(mtd, NULL, 0); + if (err) + goto fail; + + platform_set_drvdata(pdev, nfc); + return 0; + +fail: + mxic_nfc_clk_disable(nfc); + return err; +} + +static int mxic_nfc_remove(struct platform_device *pdev) +{ + struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev); + + nand_release(&nfc->chip); + mxic_nfc_clk_disable(nfc); + return 0; +} + +static const struct of_device_id mxic_nfc_of_ids[] = { + { .compatible = "mxic,multi-itfc-v009-nand-controller", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mxic_nfc_of_ids); + +static struct platform_driver mxic_nfc_driver = { + .probe = mxic_nfc_probe, + .remove = mxic_nfc_remove, + .driver = { + .name = "mxic-nfc", + .of_match_table = mxic_nfc_of_ids, + }, +}; +module_platform_driver(mxic_nfc_driver); + +MODULE_AUTHOR("Mason Yang <masonccyang@mxic.com.tw>"); +MODULE_DESCRIPTION("Macronix raw NAND controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 91f046d4d452..f64e3b6605c6 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -292,12 +292,16 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page) struct mtd_info *mtd = nand_to_mtd(chip); int last_page = ((mtd->erasesize - mtd->writesize) >> chip->page_shift) & chip->pagemask; + unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE + | NAND_BBM_LASTPAGE; + if (page == 0 && !(chip->options & bbm_flags)) + return 0; if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) return 0; - else if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) + if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) return 1; - else if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) + if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) return last_page; return -EINVAL; @@ -4112,7 +4116,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct nand_chip *chip = mtd_to_nand(mtd); - int ret = -ENOTSUPP; + int ret; ops->retlen = 0; diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 2ef15ef94525..96045d60471e 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -1232,7 +1232,7 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd) if (!td) { if ((res = nand_memory_bbt(this, bd))) { pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n"); - goto err; + goto err_free_bbt; } return 0; } @@ -1245,7 +1245,7 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd) buf = vmalloc(len); if (!buf) { res = -ENOMEM; - goto err; + goto err_free_bbt; } /* Is the bbt at a given page? */ @@ -1258,7 +1258,7 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd) res = check_create(this, buf, bd); if (res) - goto err; + goto err_free_buf; /* Prevent the bbt regions from erasing / writing */ mark_bbt_region(this, td); @@ -1268,7 +1268,9 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd) vfree(buf); return 0; -err: +err_free_buf: + vfree(buf); +err_free_bbt: kfree(this->bbt); this->bbt = NULL; return res; diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index 58511aeb0c9a..3ff7ce00cbdb 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -59,7 +59,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip) */ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip) { - unsigned int i; + int i; static const char * const broken_get_timings[] = { "MX30LF1G18AC", "MX30LF1G28AC", @@ -80,12 +80,9 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip) if (!chip->parameters.supports_set_get_features) return; - for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) { - if (!strcmp(broken_get_timings[i], chip->parameters.model)) - break; - } - - if (i == ARRAY_SIZE(broken_get_timings)) + i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings), + chip->parameters.model); + if (i < 0) return; bitmap_clear(chip->parameters.get_feature_list, diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 8ca9fad6e6ad..56654030ec7f 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -446,8 +446,10 @@ static int micron_nand_init(struct nand_chip *chip) if (ret) goto err_free_manuf_data; + chip->options |= NAND_BBM_FIRSTPAGE; + if (mtd->writesize == 2048) - chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; + chip->options |= NAND_BBM_SECONDPAGE; ondie = micron_supports_on_die_ecc(chip); diff --git a/drivers/mtd/nand/raw/nuc900_nand.c b/drivers/mtd/nand/raw/nuc900_nand.c deleted file mode 100644 index 13bf7b2894d3..000000000000 --- a/drivers/mtd/nand/raw/nuc900_nand.c +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright © 2009 Nuvoton technology corporation. - * - * Wan ZongShun <mcuos.com@gmail.com> - */ - -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/clk.h> -#include <linux/err.h> - -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/partitions.h> - -#define REG_FMICSR 0x00 -#define REG_SMCSR 0xa0 -#define REG_SMISR 0xac -#define REG_SMCMD 0xb0 -#define REG_SMADDR 0xb4 -#define REG_SMDATA 0xb8 - -#define RESET_FMI 0x01 -#define NAND_EN 0x08 -#define READYBUSY (0x01 << 18) - -#define SWRST 0x01 -#define PSIZE (0x01 << 3) -#define DMARWEN (0x03 << 1) -#define BUSWID (0x01 << 4) -#define ECC4EN (0x01 << 5) -#define WP (0x01 << 24) -#define NANDCS (0x01 << 25) -#define ENDADDR (0x01 << 31) - -#define read_data_reg(dev) \ - __raw_readl((dev)->reg + REG_SMDATA) - -#define write_data_reg(dev, val) \ - __raw_writel((val), (dev)->reg + REG_SMDATA) - -#define write_cmd_reg(dev, val) \ - __raw_writel((val), (dev)->reg + REG_SMCMD) - -#define write_addr_reg(dev, val) \ - __raw_writel((val), (dev)->reg + REG_SMADDR) - -struct nuc900_nand { - struct nand_chip chip; - void __iomem *reg; - struct clk *clk; - spinlock_t lock; -}; - -static inline struct nuc900_nand *mtd_to_nuc900(struct mtd_info *mtd) -{ - return container_of(mtd_to_nand(mtd), struct nuc900_nand, chip); -} - -static const struct mtd_partition partitions[] = { - { - .name = "NAND FS 0", - .offset = 0, - .size = 8 * 1024 * 1024 - }, - { - .name = "NAND FS 1", - .offset = MTDPART_OFS_APPEND, - .size = MTDPART_SIZ_FULL - } -}; - -static unsigned char nuc900_nand_read_byte(struct nand_chip *chip) -{ - unsigned char ret; - struct nuc900_nand *nand = mtd_to_nuc900(nand_to_mtd(chip)); - - ret = (unsigned char)read_data_reg(nand); - - return ret; -} - -static void nuc900_nand_read_buf(struct nand_chip *chip, - unsigned char *buf, int len) -{ - int i; - struct nuc900_nand *nand = mtd_to_nuc900(nand_to_mtd(chip)); - - for (i = 0; i < len; i++) - buf[i] = (unsigned char)read_data_reg(nand); -} - -static void nuc900_nand_write_buf(struct nand_chip *chip, - const unsigned char *buf, int len) -{ - int i; - struct nuc900_nand *nand = mtd_to_nuc900(nand_to_mtd(chip)); - - for (i = 0; i < len; i++) - write_data_reg(nand, buf[i]); -} - -static int nuc900_check_rb(struct nuc900_nand *nand) -{ - unsigned int val; - spin_lock(&nand->lock); - val = __raw_readl(nand->reg + REG_SMISR); - val &= READYBUSY; - spin_unlock(&nand->lock); - - return val; -} - -static int nuc900_nand_devready(struct nand_chip *chip) -{ - struct nuc900_nand *nand = mtd_to_nuc900(nand_to_mtd(chip)); - int ready; - - ready = (nuc900_check_rb(nand)) ? 1 : 0; - return ready; -} - -static void nuc900_nand_command_lp(struct nand_chip *chip, - unsigned int command, - int column, int page_addr) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - struct nuc900_nand *nand = mtd_to_nuc900(mtd); - - if (command == NAND_CMD_READOOB) { - column += mtd->writesize; - command = NAND_CMD_READ0; - } - - write_cmd_reg(nand, command & 0xff); - - if (column != -1 || page_addr != -1) { - - if (column != -1) { - if (chip->options & NAND_BUSWIDTH_16 && - !nand_opcode_8bits(command)) - column >>= 1; - write_addr_reg(nand, column); - write_addr_reg(nand, column >> 8 | ENDADDR); - } - if (page_addr != -1) { - write_addr_reg(nand, page_addr); - - if (chip->options & NAND_ROW_ADDR_3) { - write_addr_reg(nand, page_addr >> 8); - write_addr_reg(nand, page_addr >> 16 | ENDADDR); - } else { - write_addr_reg(nand, page_addr >> 8 | ENDADDR); - } - } - } - - switch (command) { - case NAND_CMD_CACHEDPROG: - case NAND_CMD_PAGEPROG: - case NAND_CMD_ERASE1: - case NAND_CMD_ERASE2: - case NAND_CMD_SEQIN: - case NAND_CMD_RNDIN: - case NAND_CMD_STATUS: - return; - - case NAND_CMD_RESET: - if (chip->legacy.dev_ready) - break; - udelay(chip->legacy.chip_delay); - - write_cmd_reg(nand, NAND_CMD_STATUS); - write_cmd_reg(nand, command); - - while (!nuc900_check_rb(nand)) - ; - - return; - - case NAND_CMD_RNDOUT: - write_cmd_reg(nand, NAND_CMD_RNDOUTSTART); - return; - - case NAND_CMD_READ0: - write_cmd_reg(nand, NAND_CMD_READSTART); - /* fall through */ - - default: - - if (!chip->legacy.dev_ready) { - udelay(chip->legacy.chip_delay); - return; - } - } - - /* Apply this short delay always to ensure that we do wait tWB in - * any case on any machine. */ - ndelay(100); - - while (!chip->legacy.dev_ready(chip)) - ; -} - - -static void nuc900_nand_enable(struct nuc900_nand *nand) -{ - unsigned int val; - spin_lock(&nand->lock); - __raw_writel(RESET_FMI, (nand->reg + REG_FMICSR)); - - val = __raw_readl(nand->reg + REG_FMICSR); - - if (!(val & NAND_EN)) - __raw_writel(val | NAND_EN, nand->reg + REG_FMICSR); - - val = __raw_readl(nand->reg + REG_SMCSR); - - val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS); - val |= WP; - - __raw_writel(val, nand->reg + REG_SMCSR); - - spin_unlock(&nand->lock); -} - -static int nuc900_nand_probe(struct platform_device *pdev) -{ - struct nuc900_nand *nuc900_nand; - struct nand_chip *chip; - struct mtd_info *mtd; - struct resource *res; - - nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand), - GFP_KERNEL); - if (!nuc900_nand) - return -ENOMEM; - chip = &(nuc900_nand->chip); - mtd = nand_to_mtd(chip); - - mtd->dev.parent = &pdev->dev; - spin_lock_init(&nuc900_nand->lock); - - nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(nuc900_nand->clk)) - return -ENOENT; - clk_enable(nuc900_nand->clk); - - chip->legacy.cmdfunc = nuc900_nand_command_lp; - chip->legacy.dev_ready = nuc900_nand_devready; - chip->legacy.read_byte = nuc900_nand_read_byte; - chip->legacy.write_buf = nuc900_nand_write_buf; - chip->legacy.read_buf = nuc900_nand_read_buf; - chip->legacy.chip_delay = 50; - chip->options = 0; - chip->ecc.mode = NAND_ECC_SOFT; - chip->ecc.algo = NAND_ECC_HAMMING; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(nuc900_nand->reg)) - return PTR_ERR(nuc900_nand->reg); - - nuc900_nand_enable(nuc900_nand); - - if (nand_scan(chip, 1)) - return -ENXIO; - - mtd_device_register(mtd, partitions, ARRAY_SIZE(partitions)); - - platform_set_drvdata(pdev, nuc900_nand); - - return 0; -} - -static int nuc900_nand_remove(struct platform_device *pdev) -{ - struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); - - nand_release(&nuc900_nand->chip); - clk_disable(nuc900_nand->clk); - - return 0; -} - -static struct platform_driver nuc900_nand_driver = { - .probe = nuc900_nand_probe, - .remove = nuc900_nand_remove, - .driver = { - .name = "nuc900-fmi", - }, -}; - -module_platform_driver(nuc900_nand_driver); - -MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); -MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:nuc900-fmi"); diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index 8d881a28140e..ad77c112a78a 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -1501,7 +1501,7 @@ static int omap_elm_correct_data(struct nand_chip *chip, u_char *data, } /* Update number of correctable errors */ - stat += err_vec[i].error_count; + stat = max_t(unsigned int, stat, err_vec[i].error_count); /* Update page data with sector size */ data += ecc->size; @@ -1967,10 +1967,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip) case NAND_OMAP_PREFETCH_IRQ: info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0); - if (info->gpmc_irq_fifo <= 0) { - dev_err(dev, "Error getting fifo IRQ\n"); + if (info->gpmc_irq_fifo <= 0) return -ENODEV; - } err = devm_request_irq(dev, info->gpmc_irq_fifo, omap_nand_irq, IRQF_SHARED, "gpmc-nand-fifo", info); @@ -1982,10 +1980,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip) } info->gpmc_irq_count = platform_get_irq(info->pdev, 1); - if (info->gpmc_irq_count <= 0) { - dev_err(dev, "Error getting IRQ count\n"); + if (info->gpmc_irq_count <= 0) return -ENODEV; - } err = devm_request_irq(dev, info->gpmc_irq_count, omap_nand_irq, IRQF_SHARED, "gpmc-nand-count", info); diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index 30c51f772de6..c43cb4d92d3d 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -116,7 +116,7 @@ static int oxnas_nand_probe(struct platform_device *pdev) GFP_KERNEL); if (!chip) { err = -ENOMEM; - goto err_clk_unprepare; + goto err_release_child; } chip->controller = &oxnas->base; @@ -137,12 +137,12 @@ static int oxnas_nand_probe(struct platform_device *pdev) /* Scan to find existence of the device */ err = nand_scan(chip, 1); if (err) - goto err_clk_unprepare; + goto err_release_child; err = mtd_device_register(mtd, NULL, 0); if (err) { nand_release(chip); - goto err_clk_unprepare; + goto err_release_child; } oxnas->chips[nchips] = chip; @@ -159,6 +159,8 @@ static int oxnas_nand_probe(struct platform_device *pdev) return 0; +err_release_child: + of_node_put(nand_np); err_clk_unprepare: clk_disable_unprepare(oxnas->clk); return err; diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index dae0d235bb17..77774250fb11 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -998,7 +998,7 @@ static void r852_shutdown(struct pci_dev *pci_dev) #ifdef CONFIG_PM_SLEEP static int r852_suspend(struct device *device) { - struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); + struct r852_device *dev = dev_get_drvdata(device); if (dev->ctlreg & R852_CTL_CARDENABLE) return -EBUSY; @@ -1019,7 +1019,7 @@ static int r852_suspend(struct device *device) static int r852_resume(struct device *device) { - struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); + struct r852_device *dev = dev_get_drvdata(device); r852_disable_irqs(dev); r852_card_update_present(dev); diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c index e509c93737c4..058e99d0cbcf 100644 --- a/drivers/mtd/nand/raw/sh_flctl.c +++ b/drivers/mtd/nand/raw/sh_flctl.c @@ -1129,10 +1129,8 @@ static int flctl_probe(struct platform_device *pdev) flctl->fifo = res->start + 0x24; /* FLDTFIFO */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl); diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index e63acc077c18..3ba73f18841f 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -37,6 +37,7 @@ /* Max ECC buffer length */ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) +#define FMC2_TIMEOUT_US 1000 #define FMC2_TIMEOUT_MS 1000 /* Timings */ @@ -53,6 +54,8 @@ #define FMC2_PMEM 0x88 #define FMC2_PATT 0x8c #define FMC2_HECCR 0x94 +#define FMC2_ISR 0x184 +#define FMC2_ICR 0x188 #define FMC2_CSQCR 0x200 #define FMC2_CSQCFGR1 0x204 #define FMC2_CSQCFGR2 0x208 @@ -118,6 +121,12 @@ #define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) #define FMC2_PATT_DEFAULT 0x0a0a0a0a +/* Register: FMC2_ISR */ +#define FMC2_ISR_IHLF BIT(1) + +/* Register: FMC2_ICR */ +#define FMC2_ICR_CIHLF BIT(1) + /* Register: FMC2_CSQCR */ #define FMC2_CSQCR_CSQSTART BIT(0) @@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, stm32_fmc2_set_buswidth_16(fmc2, true); } +static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + const struct nand_sdr_timings *timings; + u32 isr, sr; + + /* Check if there is no pending requests to the NAND flash */ + if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, + sr & FMC2_SR_NWRF, 1, + FMC2_TIMEOUT_US)) + dev_warn(fmc2->dev, "Waitrdy timeout\n"); + + /* Wait tWB before R/B# signal is low */ + timings = nand_get_sdr_timings(&chip->data_interface); + ndelay(PSEC_TO_NSEC(timings->tWB_max)); + + /* R/B# signal is low, clear high level flag */ + writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR); + + /* Wait R/B# signal is high */ + return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR, + isr, isr & FMC2_ISR_IHLF, + 5, 1000 * timeout_ms); +} + static int stm32_fmc2_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) @@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, break; case NAND_OP_WAITRDY_INSTR: - ret = nand_soft_waitrdy(chip, - instr->ctx.waitrdy.timeout_ms); + ret = stm32_fmc2_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); break; } } @@ -1427,21 +1461,16 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, struct stm32_fmc2_timings *tims = &nand->timings; unsigned long hclk = clk_get_rate(fmc2->clk); unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000); - int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att; - - tar = hclkp; - if (tar < sdrt->tAR_min) - tar = sdrt->tAR_min; - tims->tar = DIV_ROUND_UP(tar, hclkp) - 1; - if (tims->tar > FMC2_PCR_TIMING_MASK) - tims->tar = FMC2_PCR_TIMING_MASK; - - tclr = hclkp; - if (tclr < sdrt->tCLR_min) - tclr = sdrt->tCLR_min; - tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1; - if (tims->tclr > FMC2_PCR_TIMING_MASK) - tims->tclr = FMC2_PCR_TIMING_MASK; + unsigned long timing, tar, tclr, thiz, twait; + unsigned long tset_mem, tset_att, thold_mem, thold_att; + + tar = max_t(unsigned long, hclkp, sdrt->tAR_min); + timing = DIV_ROUND_UP(tar, hclkp) - 1; + tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK); + + tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min); + timing = DIV_ROUND_UP(tclr, hclkp) - 1; + tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK); tims->thiz = FMC2_THIZ; thiz = (tims->thiz + 1) * hclkp; @@ -1451,18 +1480,11 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, * tWAIT > tWP * tWAIT > tREA + tIO */ - twait = hclkp; - if (twait < sdrt->tRP_min) - twait = sdrt->tRP_min; - if (twait < sdrt->tWP_min) - twait = sdrt->tWP_min; - if (twait < sdrt->tREA_max + FMC2_TIO) - twait = sdrt->tREA_max + FMC2_TIO; - tims->twait = DIV_ROUND_UP(twait, hclkp); - if (tims->twait == 0) - tims->twait = 1; - else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK) - tims->twait = FMC2_PMEM_PATT_TIMING_MASK; + twait = max_t(unsigned long, hclkp, sdrt->tRP_min); + twait = max_t(unsigned long, twait, sdrt->tWP_min); + twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO); + timing = DIV_ROUND_UP(twait, hclkp); + tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); /* * tSETUP_MEM > tCS - tWAIT @@ -1477,20 +1499,15 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, if (twait > thiz && (sdrt->tDS_min > twait - thiz) && (tset_mem < sdrt->tDS_min - (twait - thiz))) tset_mem = sdrt->tDS_min - (twait - thiz); - tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp); - if (tims->tset_mem == 0) - tims->tset_mem = 1; - else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK) - tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK; + timing = DIV_ROUND_UP(tset_mem, hclkp); + tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); /* * tHOLD_MEM > tCH * tHOLD_MEM > tREH - tSETUP_MEM * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT) */ - thold_mem = hclkp; - if (thold_mem < sdrt->tCH_min) - thold_mem = sdrt->tCH_min; + thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min); if (sdrt->tREH_min > tset_mem && (thold_mem < sdrt->tREH_min - tset_mem)) thold_mem = sdrt->tREH_min - tset_mem; @@ -1500,11 +1517,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, if ((sdrt->tWC_min > tset_mem + twait) && (thold_mem < sdrt->tWC_min - (tset_mem + twait))) thold_mem = sdrt->tWC_min - (tset_mem + twait); - tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp); - if (tims->thold_mem == 0) - tims->thold_mem = 1; - else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK) - tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK; + timing = DIV_ROUND_UP(thold_mem, hclkp); + tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); /* * tSETUP_ATT > tCS - tWAIT @@ -1526,11 +1540,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, if (twait > thiz && (sdrt->tDS_min > twait - thiz) && (tset_att < sdrt->tDS_min - (twait - thiz))) tset_att = sdrt->tDS_min - (twait - thiz); - tims->tset_att = DIV_ROUND_UP(tset_att, hclkp); - if (tims->tset_att == 0) - tims->tset_att = 1; - else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK) - tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK; + timing = DIV_ROUND_UP(tset_att, hclkp); + tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); /* * tHOLD_ATT > tALH @@ -1545,17 +1556,11 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT) * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT) */ - thold_att = hclkp; - if (thold_att < sdrt->tALH_min) - thold_att = sdrt->tALH_min; - if (thold_att < sdrt->tCH_min) - thold_att = sdrt->tCH_min; - if (thold_att < sdrt->tCLH_min) - thold_att = sdrt->tCLH_min; - if (thold_att < sdrt->tCOH_min) - thold_att = sdrt->tCOH_min; - if (thold_att < sdrt->tDH_min) - thold_att = sdrt->tDH_min; + thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min); + thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min); + thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min); + thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min); + thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min); if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) && (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem)) thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem; @@ -1574,11 +1579,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, if ((sdrt->tWC_min > tset_att + twait) && (thold_att < sdrt->tWC_min - (tset_att + twait))) thold_att = sdrt->tWC_min - (tset_att + twait); - tims->thold_att = DIV_ROUND_UP(thold_att, hclkp); - if (tims->thold_att == 0) - tims->thold_att = 1; - else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK) - tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK; + timing = DIV_ROUND_UP(thold_att, hclkp); + tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); } static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, @@ -1912,11 +1914,8 @@ static int stm32_fmc2_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - if (irq != -EPROBE_DEFER) - dev_err(dev, "IRQ error missing or invalid\n"); + if (irq < 0) return irq; - } ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0, dev_name(dev), fmc2); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 89773293c64d..37a4ac0dd85b 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2071,10 +2071,8 @@ static int sunxi_nfc_probe(struct platform_device *pdev) return PTR_ERR(nfc->regs); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "failed to retrieve irq\n"); + if (irq < 0) return irq; - } nfc->ahb_clk = devm_clk_get(dev, "ahb"); if (IS_ERR(nfc->ahb_clk)) { diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index b3f2cabcc7c0..9acf2de37ee0 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -659,6 +659,7 @@ static int tango_nand_probe(struct platform_device *pdev) err = chip_init(&pdev->dev, np); if (err) { tango_nand_remove(pdev); + of_node_put(np); return err; } } diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c index e4fe8c4bc711..6b399a75f9ae 100644 --- a/drivers/mtd/nand/raw/vf610_nfc.c +++ b/drivers/mtd/nand/raw/vf610_nfc.c @@ -862,6 +862,7 @@ static int vf610_nfc_probe(struct platform_device *pdev) dev_err(nfc->dev, "Only one NAND chip supported!\n"); err = -EINVAL; + of_node_put(child); goto err_disable_clk; } diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 1cb3760ff779..0db5ee4e82af 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -124,6 +124,16 @@ static const struct spinand_info toshiba_spinand_table[] = { 0, SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, tc58cxgxsx_ecc_get_status)), + /* 3.3V 4Gb */ + SPINAND_INFO("TC58CVG2S0", 0xED, + NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), /* 1.8V 1Gb */ SPINAND_INFO("TC58CYG0S3", 0xB2, NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), |