summaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand/davinci_nand.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/nand/davinci_nand.c')
-rw-r--r--drivers/mtd/nand/davinci_nand.c342
1 files changed, 302 insertions, 40 deletions
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 02700f769b8a..0fad6487e6f4 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -44,7 +44,7 @@
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
- * The 1-bit ECC hardware is supported, but not yet the newer 4-bit ECC
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
@@ -54,11 +54,14 @@
struct davinci_nand_info {
struct mtd_info mtd;
struct nand_chip chip;
+ struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
bool partitioned;
+ bool is_readmode;
+
void __iomem *base;
void __iomem *vaddr;
@@ -73,6 +76,7 @@ struct davinci_nand_info {
};
static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
@@ -218,6 +222,192 @@ static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
/*----------------------------------------------------------------------*/
/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+ u_char *data, u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ unsigned num_errors, corrected;
+
+ /* All bytes 0xff? It's an erased page; ignore its ECC. */
+ for (i = 0; i < 10; i++) {
+ if (ecc_code[i] != 0xff)
+ goto compare;
+ }
+ return 0;
+
+compare:
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (unsigned) ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ return 0;
+ case 1: /* five or more errors detected */
+ return -EIO;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
* NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
* how these chips are normally wired. This translates to both 8 and 16
* bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
@@ -294,6 +484,23 @@ static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
/*----------------------------------------------------------------------*/
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_small __initconst = {
+ .eccbytes = 10,
+ .eccpos = { 0, 1, 2, 3, 4,
+ /* offset 5 holds the badblock marker */
+ 6, 7,
+ 13, 14, 15, },
+ .oobfree = {
+ {.offset = 8, .length = 5, },
+ {.offset = 16, },
+ },
+};
+
+
static int __init nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
@@ -306,6 +513,10 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
/* which external chipselect will we be managing? */
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
@@ -351,7 +562,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.select_chip = nand_davinci_select_chip;
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
- info->chip.options = pdata ? pdata->options : 0;
+ info->chip.options = pdata->options;
info->ioaddr = (uint32_t __force) vaddr;
@@ -360,14 +571,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
- if (pdata && pdata->mask_ale)
- info->mask_ale = pdata->mask_cle;
- else
- info->mask_ale = MASK_ALE;
- if (pdata && pdata->mask_cle)
- info->mask_cle = pdata->mask_cle;
- else
- info->mask_cle = MASK_CLE;
+ info->mask_ale = pdata->mask_cle ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
info->chip.cmd_ctrl = nand_davinci_hwcontrol;
@@ -377,30 +582,44 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.read_buf = nand_davinci_read_buf;
info->chip.write_buf = nand_davinci_write_buf;
- /* use board-specific ECC config; else, the best available */
- if (pdata)
- ecc_mode = pdata->ecc_mode;
- else
- ecc_mode = NAND_ECC_HW;
+ /* Use board-specific ECC config */
+ ecc_mode = pdata->ecc_mode;
+ ret = -EINVAL;
switch (ecc_mode) {
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
break;
case NAND_ECC_HW:
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ if (pdata->ecc_bits == 4) {
+ /* No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ goto err_ecc;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ } else {
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ }
info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 3;
break;
- case NAND_ECC_HW_SYNDROME:
- /* FIXME implement */
- info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 10;
-
- dev_warn(&pdev->dev, "4-bit ECC nyet supported\n");
- /* FALL THROUGH */
default:
ret = -EINVAL;
goto err_ecc;
@@ -441,12 +660,56 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
}
+ /* Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = info->mtd.writesize / 512;
+
+ if (!chunks || info->mtd.oobsize < 16) {
+ dev_dbg(&pdev->dev, "too small\n");
+ ret = -EINVAL;
+ goto err_scan;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ info->ecclayout = hwecc4_small;
+ info->ecclayout.oobfree[1].length =
+ info->mtd.oobsize - 16;
+ goto syndrome_done;
+ }
+
+ /* For large page chips we'll be wanting to use a
+ * not-yet-implemented mode that reads OOB data
+ * before reading the body of the page, to avoid
+ * the "infix OOB" model of NAND_ECC_HW_SYNDROME
+ * (and preserve manufacturer badblock markings).
+ */
+ dev_warn(&pdev->dev, "no 4-bit ECC support yet "
+ "for large page NAND\n");
+ ret = -EIO;
+ goto err_scan;
+
+syndrome_done:
+ info->chip.ecc.layout = &info->ecclayout;
+ }
+
+ ret = nand_scan_tail(&info->mtd);
+ if (ret < 0)
+ goto err_scan;
+
if (mtd_has_partitions()) {
struct mtd_partition *mtd_parts = NULL;
int mtd_parts_nb = 0;
@@ -455,22 +718,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
static const char *probes[] __initconst =
{ "cmdlinepart", NULL };
- const char *master_name;
-
- /* Set info->mtd.name = 0 temporarily */
- master_name = info->mtd.name;
- info->mtd.name = (char *)0;
-
- /* info->mtd.name == 0, means: don't bother checking
- <mtd-id> */
mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
&mtd_parts, 0);
-
- /* Restore info->mtd.name */
- info->mtd.name = master_name;
}
- if (mtd_parts_nb <= 0 && pdata) {
+ if (mtd_parts_nb <= 0) {
mtd_parts = pdata->parts;
mtd_parts_nb = pdata->nr_parts;
}
@@ -483,7 +735,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->partitioned = true;
}
- } else if (pdata && pdata->nr_parts) {
+ } else if (pdata->nr_parts) {
dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, info->mtd.name);
}
@@ -509,6 +761,11 @@ err_scan:
err_clk_enable:
clk_put(info->clk);
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
err_ecc:
err_clk:
err_ioremap:
@@ -532,6 +789,11 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
else
status = del_mtd_device(&info->mtd);
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
iounmap(info->base);
iounmap(info->vaddr);
OpenPOWER on IntegriCloud