diff options
Diffstat (limited to 'drivers/mmc/host')
33 files changed, 2915 insertions, 1485 deletions
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 54f91321749a..94df40531c38 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -311,7 +311,7 @@ config MMC_MSM config MMC_MXC tristate "Freescale i.MX2/3 Multimedia Card Interface support" - depends on ARCH_MXC + depends on MACH_MX21 || MACH_MX27 || ARCH_MX31 help This selects the Freescale i.MX2/3 Multimedia card Interface. If you have a i.MX platform with a Multimedia Card slot, @@ -319,6 +319,15 @@ config MMC_MXC If unsure, say N. +config MMC_MXS + tristate "Freescale MXS Multimedia Card Interface support" + depends on ARCH_MXS && MXS_DMA + help + This selects the Freescale SSP MMC controller found on MXS based + platforms like mx23/28. + + If unsure, say N. + config MMC_TIFM_SD tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" depends on EXPERIMENTAL && PCI @@ -430,13 +439,25 @@ config MMC_SDRICOH_CS To compile this driver as a module, choose M here: the module will be called sdricoh_cs. +config MMC_TMIO_CORE + tristate + config MMC_TMIO tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" - depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI + depends on MFD_TMIO || MFD_ASIC3 + select MMC_TMIO_CORE help This provides support for the SD/MMC cell found in TC6393XB, T7L66XB and also HTC ASIC3 +config MMC_SDHI + tristate "SH-Mobile SDHI SD/SDIO controller support" + depends on SUPERH || ARCH_SHMOBILE + select MMC_TMIO_CORE + help + This provides support for the SDHI SD/SDIO controller found in + SuperH and ARM SH-Mobile SoCs + config MMC_CB710 tristate "ENE CB710 MMC/SD Interface support" depends on PCI diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index e834fb223e9a..4f1df0aae574 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o obj-$(CONFIG_MMC_PXA) += pxamci.o obj-$(CONFIG_MMC_IMX) += imxmmc.o obj-$(CONFIG_MMC_MXC) += mxcmmc.o +obj-$(CONFIG_MMC_MXS) += mxs-mmc.o obj-$(CONFIG_MMC_SDHCI) += sdhci.o obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o @@ -28,7 +29,13 @@ endif obj-$(CONFIG_MMC_S3C) += s3cmci.o obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o -obj-$(CONFIG_MMC_CB710) += cb710-mmc.o +obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o +tmio_mmc_core-y := tmio_mmc_pio.o +ifneq ($(CONFIG_MMC_SDHI),n) +tmio_mmc_core-y += tmio_mmc_dma.o +endif +obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o +obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o obj-$(CONFIG_MMC_DW) += dw_mmc.o diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index ad2a7a032cdf..ea3888b65d5d 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -127,7 +127,7 @@ struct atmel_mci_dma { * EVENT_DATA_COMPLETE is set in @pending_events, all data-related * interrupts must be disabled and @data_status updated with a * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the - * CMDRDY interupt must be disabled and @cmd_status updated with a + * CMDRDY interrupt must be disabled and @cmd_status updated with a * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the * bytes_xfered field of @data must be written. This is ensured by * using barriers. @@ -578,7 +578,8 @@ static void atmci_dma_cleanup(struct atmel_mci *host) struct mmc_data *data = host->data; if (data) - dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, + dma_unmap_sg(host->dma.chan->device->dev, + data->sg, data->sg_len, ((data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); } @@ -588,7 +589,7 @@ static void atmci_stop_dma(struct atmel_mci *host) struct dma_chan *chan = host->data_chan; if (chan) { - chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_terminate_all(chan); atmci_dma_cleanup(host); } else { /* Data transfer was stopped by the interrupt handler */ @@ -684,11 +685,11 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) else direction = DMA_TO_DEVICE; - sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); - if (sglen != data->sg_len) - goto unmap_exit; + sglen = dma_map_sg(chan->device->dev, data->sg, + data->sg_len, direction); + desc = chan->device->device_prep_slave_sg(chan, - data->sg, data->sg_len, direction, + data->sg, sglen, direction, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) goto unmap_exit; @@ -699,7 +700,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) return 0; unmap_exit: - dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); return -ENOMEM; } @@ -709,8 +710,8 @@ static void atmci_submit_data(struct atmel_mci *host) struct dma_async_tx_descriptor *desc = host->dma.data_desc; if (chan) { - desc->tx_submit(desc); - chan->device->device_issue_pending(chan); + dmaengine_submit(desc); + dma_async_issue_pending(chan); } } @@ -1081,7 +1082,7 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) /* * Update the MMC clock rate if necessary. This may be * necessary if set_ios() is called when a different slot is - * busy transfering data. + * busy transferring data. */ if (host->need_clock_update) { mci_writel(host, MR, host->mode_reg); diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 66b4ce587f4b..ce2a47b71dd6 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -205,7 +205,7 @@ static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask) "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n", limit, mask, e, x); #endif - return 0; + return err; } static void cb710_mmc_set_transfer_size(struct cb710_slot *slot, diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 2fcc82577c1b..87e1f57ec9ba 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -32,6 +32,7 @@ #include <linux/mmc/mmc.h> #include <linux/mmc/dw_mmc.h> #include <linux/bitops.h> +#include <linux/regulator/consumer.h> #include "dw_mmc.h" @@ -315,7 +316,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) /* Stop the IDMAC running */ temp = mci_readl(host, BMOD); - temp &= ~SDMMC_IDMAC_ENABLE; + temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); mci_writel(host, BMOD, temp); } @@ -384,7 +385,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) /* Enable the IDMAC */ temp = mci_readl(host, BMOD); - temp |= SDMMC_IDMAC_ENABLE; + temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; mci_writel(host, BMOD, temp); /* Start it running */ @@ -562,7 +563,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); /* enable clock */ - mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE); + mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE | + SDMMC_CLKEN_LOW_PWR); /* inform CIU */ mci_send_cmd(slot, @@ -661,6 +663,7 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct dw_mci_slot *slot = mmc_priv(mmc); + u32 regs; /* set default 1 bit mode */ slot->ctype = SDMMC_CTYPE_1BIT; @@ -672,6 +675,16 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_BUS_WIDTH_4: slot->ctype = SDMMC_CTYPE_4BIT; break; + case MMC_BUS_WIDTH_8: + slot->ctype = SDMMC_CTYPE_8BIT; + break; + } + + /* DDR mode set */ + if (ios->ddr) { + regs = mci_readl(slot->host, UHS_REG); + regs |= (0x1 << slot->id) << 16; + mci_writel(slot->host, UHS_REG, regs); } if (ios->clock) { @@ -717,7 +730,9 @@ static int dw_mci_get_cd(struct mmc_host *mmc) struct dw_mci_board *brd = slot->host->pdata; /* Use platform get_cd function, else try onboard card detect */ - if (brd->get_cd) + if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) + present = 1; + else if (brd->get_cd) present = !brd->get_cd(slot->id); else present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) @@ -1019,13 +1034,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host) struct mmc_data *data = host->data; int shift = host->data_shift; u32 status; - unsigned int nbytes = 0, len, old_len, count = 0; + unsigned int nbytes = 0, len; do { len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift; - if (count == 0) - old_len = len; - if (offset + len <= sg->length) { host->pull_data(host, (void *)(buf + offset), len); @@ -1070,7 +1082,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host) tasklet_schedule(&host->tasklet); return; } - count++; } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ len = SDMMC_GET_FCNT(mci_readl(host, STATUS)); host->pio_offset = offset; @@ -1395,7 +1406,11 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) if (host->pdata->setpower) host->pdata->setpower(id, 0); - mmc->caps = 0; + if (host->pdata->caps) + mmc->caps = host->pdata->caps; + else + mmc->caps = 0; + if (host->pdata->get_bus_wd) if (host->pdata->get_bus_wd(slot->id) >= 4) mmc->caps |= MMC_CAP_4_BIT_DATA; @@ -1426,6 +1441,13 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) } #endif /* CONFIG_MMC_DW_IDMAC */ + host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); + if (IS_ERR(host->vmmc)) { + printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc)); + host->vmmc = NULL; + } else + regulator_enable(host->vmmc); + if (dw_mci_get_cd(mmc)) set_bit(DW_MMC_CARD_PRESENT, &slot->flags); else @@ -1441,6 +1463,12 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) /* Card initially undetected */ slot->last_detect_state = 0; + /* + * Card may have been plugged in prior to boot so we + * need to run the detect tasklet + */ + tasklet_schedule(&host->card_tasklet); + return 0; } @@ -1619,8 +1647,9 @@ static int dw_mci_probe(struct platform_device *pdev) */ fifo_size = mci_readl(host, FIFOTH); fifo_size = (fifo_size >> 16) & 0x7ff; - mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | - ((fifo_size/2) << 0))); + host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | + ((fifo_size/2) << 0)); + mci_writel(host, FIFOTH, host->fifoth_val); /* disable clock to CIU */ mci_writel(host, CLKENA, 0); @@ -1683,6 +1712,12 @@ err_dmaunmap: host->sg_cpu, host->sg_dma); iounmap(host->regs); + if (host->vmmc) { + regulator_disable(host->vmmc); + regulator_put(host->vmmc); + } + + err_freehost: kfree(host); return ret; @@ -1714,6 +1749,11 @@ static int __exit dw_mci_remove(struct platform_device *pdev) if (host->use_dma && host->dma_ops->exit) host->dma_ops->exit(host); + if (host->vmmc) { + regulator_disable(host->vmmc); + regulator_put(host->vmmc); + } + iounmap(host->regs); kfree(host); @@ -1729,6 +1769,9 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg) int i, ret; struct dw_mci *host = platform_get_drvdata(pdev); + if (host->vmmc) + regulator_enable(host->vmmc); + for (i = 0; i < host->num_slots; i++) { struct dw_mci_slot *slot = host->slot[i]; if (!slot) @@ -1744,6 +1787,9 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg) } } + if (host->vmmc) + regulator_disable(host->vmmc); + return 0; } @@ -1752,6 +1798,23 @@ static int dw_mci_resume(struct platform_device *pdev) int i, ret; struct dw_mci *host = platform_get_drvdata(pdev); + if (host->dma_ops->init) + host->dma_ops->init(host); + + if (!mci_wait_reset(&pdev->dev, host)) { + ret = -ENODEV; + return ret; + } + + /* Restore the old value at FIFOTH register */ + mci_writel(host, FIFOTH, host->fifoth_val); + + mci_writel(host, RINTSTS, 0xFFFFFFFF); + mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | + SDMMC_INT_TXDR | SDMMC_INT_RXDR | + DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); + mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); + for (i = 0; i < host->num_slots; i++) { struct dw_mci_slot *slot = host->slot[i]; if (!slot) diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 5dd55a75233d..23c662af5616 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -43,6 +43,7 @@ #define SDMMC_USRID 0x068 #define SDMMC_VERID 0x06c #define SDMMC_HCON 0x070 +#define SDMMC_UHS_REG 0x074 #define SDMMC_BMOD 0x080 #define SDMMC_PLDMND 0x084 #define SDMMC_DBADDR 0x088 @@ -51,7 +52,6 @@ #define SDMMC_DSCADDR 0x094 #define SDMMC_BUFADDR 0x098 #define SDMMC_DATA 0x100 -#define SDMMC_DATA_ADR 0x100 /* shift bit field */ #define _SBF(f, v) ((v) << (f)) diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 2f7fc0c5146f..7c1e16aaf17f 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -99,7 +99,7 @@ #define r1b_timeout (HZ * 3) /* One of the critical speed parameters is the amount of data which may - * be transfered in one command. If this value is too low, the SD card + * be transferred in one command. If this value is too low, the SD card * controller has to do multiple partial block writes (argggh!). With * today (2008) SD cards there is little speed gain if we transfer more * than 64 KBytes at a time. So use this value until there is any indication diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 5bbb87d10251..b4a7e4fba90f 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -68,6 +68,12 @@ static struct variant_data variant_arm = { .datalength_bits = 16, }; +static struct variant_data variant_arm_extended_fifo = { + .fifosize = 128 * 4, + .fifohalfsize = 64 * 4, + .datalength_bits = 16, +}; + static struct variant_data variant_u300 = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, @@ -1277,10 +1283,15 @@ static int mmci_resume(struct amba_device *dev) static struct amba_id mmci_ids[] = { { .id = 0x00041180, - .mask = 0x000fffff, + .mask = 0xff0fffff, .data = &variant_arm, }, { + .id = 0x01041180, + .mask = 0xff0fffff, + .data = &variant_arm_extended_fifo, + }, + { .id = 0x00041181, .mask = 0x000fffff, .data = &variant_arm, diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 97c9b3638d57..a4c865a5286b 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -267,14 +267,6 @@ msmsdcc_dma_complete_tlet(unsigned long data) dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); - if (host->curr.user_pages) { - struct scatterlist *sg = host->dma.sg; - int i; - - for (i = 0; i < host->dma.num_ents; i++) - flush_dcache_page(sg_page(sg++)); - } - host->dma.sg = NULL; host->dma.busy = 0; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 4428594261c5..cc20e0259325 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -32,16 +32,14 @@ #include <linux/io.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> +#include <linux/dmaengine.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/sizes.h> #include <mach/mmc.h> -#ifdef CONFIG_ARCH_MX2 -#include <mach/dma-mx1-mx2.h> -#define HAS_DMA -#endif +#include <mach/dma.h> #define DRIVER_NAME "mxc-mmc" @@ -118,7 +116,8 @@ struct mxcmci_host { void __iomem *base; int irq; int detect_irq; - int dma; + struct dma_chan *dma; + struct dma_async_tx_descriptor *desc; int do_dma; int default_irq_mask; int use_sdio; @@ -129,7 +128,6 @@ struct mxcmci_host { struct mmc_command *cmd; struct mmc_data *data; - unsigned int dma_nents; unsigned int datasize; unsigned int dma_dir; @@ -144,6 +142,11 @@ struct mxcmci_host { spinlock_t lock; struct regulator *vcc; + + int burstlen; + int dmareq; + struct dma_slave_config dma_slave_config; + struct imx_dma_data dma_data; }; static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); @@ -206,17 +209,16 @@ static void mxcmci_softreset(struct mxcmci_host *host) writew(0xff, host->base + MMC_REG_RES_TO); } +static int mxcmci_setup_dma(struct mmc_host *mmc); static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) { unsigned int nob = data->blocks; unsigned int blksz = data->blksz; unsigned int datasize = nob * blksz; -#ifdef HAS_DMA struct scatterlist *sg; - int i; - int ret; -#endif + int i, nents; + if (data->flags & MMC_DATA_STREAM) nob = 0xffff; @@ -227,7 +229,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) writew(blksz, host->base + MMC_REG_BLK_LEN); host->datasize = datasize; -#ifdef HAS_DMA + if (!mxcmci_use_dma(host)) + return 0; + for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->offset & 3 || sg->length & 3) { host->do_dma = 0; @@ -235,34 +239,30 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) } } - if (data->flags & MMC_DATA_READ) { + if (data->flags & MMC_DATA_READ) host->dma_dir = DMA_FROM_DEVICE; - host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, host->dma_dir); - - ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, - datasize, - host->res->start + MMC_REG_BUFFER_ACCESS, - DMA_MODE_READ); - } else { + else host->dma_dir = DMA_TO_DEVICE; - host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, host->dma_dir); - ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, - datasize, - host->res->start + MMC_REG_BUFFER_ACCESS, - DMA_MODE_WRITE); - } + nents = dma_map_sg(host->dma->device->dev, data->sg, + data->sg_len, host->dma_dir); + if (nents != data->sg_len) + return -EINVAL; + + host->desc = host->dma->device->device_prep_slave_sg(host->dma, + data->sg, data->sg_len, host->dma_dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (ret) { - dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret); - return ret; + if (!host->desc) { + dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, + host->dma_dir); + host->do_dma = 0; + return 0; /* Fall back to PIO */ } wmb(); - imx_dma_enable(host->dma); -#endif /* HAS_DMA */ + dmaengine_submit(host->desc); + return 0; } @@ -337,13 +337,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat) struct mmc_data *data = host->data; int data_error; -#ifdef HAS_DMA if (mxcmci_use_dma(host)) { - imx_dma_disable(host->dma); - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, + dmaengine_terminate_all(host->dma); + dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, host->dma_dir); } -#endif if (stat & STATUS_ERR_MASK) { dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", @@ -545,7 +543,6 @@ static void mxcmci_datawork(struct work_struct *work) } } -#ifdef HAS_DMA static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) { struct mmc_data *data = host->data; @@ -568,7 +565,6 @@ static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) mxcmci_finish_request(host, host->req); } } -#endif /* HAS_DMA */ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) { @@ -606,12 +602,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid) sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; spin_unlock_irqrestore(&host->lock, flags); -#ifdef HAS_DMA if (mxcmci_use_dma(host) && (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, host->base + MMC_REG_STATUS); -#endif if (sdio_irq) { writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS); @@ -621,14 +615,14 @@ static irqreturn_t mxcmci_irq(int irq, void *devid) if (stat & STATUS_END_CMD_RESP) mxcmci_cmd_done(host, stat); -#ifdef HAS_DMA if (mxcmci_use_dma(host) && (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) mxcmci_data_done(host, stat); -#endif + if (host->default_irq_mask && (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + return IRQ_HANDLED; } @@ -642,9 +636,10 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) host->req = req; host->cmdat &= ~CMD_DAT_CONT_INIT; -#ifdef HAS_DMA - host->do_dma = 1; -#endif + + if (host->dma) + host->do_dma = 1; + if (req->data) { error = mxcmci_setup_data(host, req->data); if (error) { @@ -660,6 +655,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) } error = mxcmci_start_cmd(host, req->cmd, cmdat); + out: if (error) mxcmci_finish_request(host, req); @@ -698,22 +694,46 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios) prescaler, divider, clk_in, clk_ios); } +static int mxcmci_setup_dma(struct mmc_host *mmc) +{ + struct mxcmci_host *host = mmc_priv(mmc); + struct dma_slave_config *config = &host->dma_slave_config; + + config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS; + config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS; + config->dst_addr_width = 4; + config->src_addr_width = 4; + config->dst_maxburst = host->burstlen; + config->src_maxburst = host->burstlen; + + return dmaengine_slave_config(host->dma, config); +} + static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mxcmci_host *host = mmc_priv(mmc); -#ifdef HAS_DMA - unsigned int blen; + int burstlen, ret; + /* * use burstlen of 64 in 4 bit mode (--> reg value 0) * use burstlen of 16 in 1 bit mode (--> reg value 16) */ if (ios->bus_width == MMC_BUS_WIDTH_4) - blen = 0; + burstlen = 64; else - blen = 16; + burstlen = 16; + + if (mxcmci_use_dma(host) && burstlen != host->burstlen) { + host->burstlen = burstlen; + ret = mxcmci_setup_dma(mmc); + if (ret) { + dev_err(mmc_dev(host->mmc), + "failed to config DMA channel. Falling back to PIO\n"); + dma_release_channel(host->dma); + host->do_dma = 0; + } + } - imx_dma_config_burstlen(host->dma, blen); -#endif if (ios->bus_width == MMC_BUS_WIDTH_4) host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; else @@ -794,6 +814,18 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) host->caps |= MMC_CAP_4_BIT_DATA; } +static bool filter(struct dma_chan *chan, void *param) +{ + struct mxcmci_host *host = param; + + if (!imx_dma_is_general_purpose(chan)) + return false; + + chan->private = &host->dma_data; + + return true; +} + static const struct mmc_host_ops mxcmci_ops = { .request = mxcmci_request, .set_ios = mxcmci_set_ios, @@ -808,6 +840,7 @@ static int mxcmci_probe(struct platform_device *pdev) struct mxcmci_host *host = NULL; struct resource *iores, *r; int ret = 0, irq; + dma_cap_mask_t mask; printk(KERN_INFO "i.MX SDHC driver\n"); @@ -883,29 +916,23 @@ static int mxcmci_probe(struct platform_device *pdev) writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR); -#ifdef HAS_DMA - host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW); - if (host->dma < 0) { - dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); - ret = -EBUSY; - goto out_clk_put; - } - r = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (!r) { - ret = -EINVAL; - goto out_free_dma; + if (r) { + host->dmareq = r->start; + host->dma_data.peripheral_type = IMX_DMATYPE_SDHC; + host->dma_data.priority = DMA_PRIO_LOW; + host->dma_data.dma_request = host->dmareq; + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + host->dma = dma_request_channel(mask, filter, host); + if (host->dma) + mmc->max_seg_size = dma_get_max_seg_size( + host->dma->device->dev); } - ret = imx_dma_config_channel(host->dma, - IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_FIFO, - IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, - r->start, 0); - if (ret) { - dev_err(mmc_dev(host->mmc), "failed to config DMA channel\n"); - goto out_free_dma; - } -#endif + if (!host->dma) + dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n"); + INIT_WORK(&host->datawork, mxcmci_datawork); ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host); @@ -928,9 +955,8 @@ static int mxcmci_probe(struct platform_device *pdev) out_free_irq: free_irq(host->irq, host); out_free_dma: -#ifdef HAS_DMA - imx_dma_free(host->dma); -#endif + if (host->dma) + dma_release_channel(host->dma); out_clk_put: clk_disable(host->clk); clk_put(host->clk); @@ -960,9 +986,10 @@ static int mxcmci_remove(struct platform_device *pdev) free_irq(host->irq, host); iounmap(host->base); -#ifdef HAS_DMA - imx_dma_free(host->dma); -#endif + + if (host->dma) + dma_release_channel(host->dma); + clk_disable(host->clk); clk_put(host->clk); diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c new file mode 100644 index 000000000000..99d39a6a1032 --- /dev/null +++ b/drivers/mmc/host/mxs-mmc.c @@ -0,0 +1,874 @@ +/* + * Portions copyright (C) 2003 Russell King, PXA MMCI Driver + * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver + * + * Copyright 2008 Embedded Alley Solutions, Inc. + * Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/highmem.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/completion.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/gpio.h> +#include <linux/regulator/consumer.h> + +#include <mach/mxs.h> +#include <mach/common.h> +#include <mach/dma.h> +#include <mach/mmc.h> + +#define DRIVER_NAME "mxs-mmc" + +/* card detect polling timeout */ +#define MXS_MMC_DETECT_TIMEOUT (HZ/2) + +#define SSP_VERSION_LATEST 4 +#define ssp_is_old() (host->version < SSP_VERSION_LATEST) + +/* SSP registers */ +#define HW_SSP_CTRL0 0x000 +#define BM_SSP_CTRL0_RUN (1 << 29) +#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28) +#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26) +#define BM_SSP_CTRL0_READ (1 << 25) +#define BM_SSP_CTRL0_DATA_XFER (1 << 24) +#define BP_SSP_CTRL0_BUS_WIDTH (22) +#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22) +#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21) +#define BM_SSP_CTRL0_LONG_RESP (1 << 19) +#define BM_SSP_CTRL0_GET_RESP (1 << 17) +#define BM_SSP_CTRL0_ENABLE (1 << 16) +#define BP_SSP_CTRL0_XFER_COUNT (0) +#define BM_SSP_CTRL0_XFER_COUNT (0xffff) +#define HW_SSP_CMD0 0x010 +#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25) +#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22) +#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21) +#define BM_SSP_CMD0_APPEND_8CYC (1 << 20) +#define BP_SSP_CMD0_BLOCK_SIZE (16) +#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16) +#define BP_SSP_CMD0_BLOCK_COUNT (8) +#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8) +#define BP_SSP_CMD0_CMD (0) +#define BM_SSP_CMD0_CMD (0xff) +#define HW_SSP_CMD1 0x020 +#define HW_SSP_XFER_SIZE 0x030 +#define HW_SSP_BLOCK_SIZE 0x040 +#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4) +#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4) +#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0) +#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf) +#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070) +#define BP_SSP_TIMING_TIMEOUT (16) +#define BM_SSP_TIMING_TIMEOUT (0xffff << 16) +#define BP_SSP_TIMING_CLOCK_DIVIDE (8) +#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8) +#define BP_SSP_TIMING_CLOCK_RATE (0) +#define BM_SSP_TIMING_CLOCK_RATE (0xff) +#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080) +#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31) +#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30) +#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29) +#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28) +#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27) +#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26) +#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25) +#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24) +#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23) +#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22) +#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21) +#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20) +#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17) +#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16) +#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15) +#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14) +#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13) +#define BM_SSP_CTRL1_POLARITY (1 << 9) +#define BP_SSP_CTRL1_WORD_LENGTH (4) +#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4) +#define BP_SSP_CTRL1_SSP_MODE (0) +#define BM_SSP_CTRL1_SSP_MODE (0xf) +#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0) +#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0) +#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0) +#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0) +#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100) +#define BM_SSP_STATUS_CARD_DETECT (1 << 28) +#define BM_SSP_STATUS_SDIO_IRQ (1 << 17) +#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130) +#define BP_SSP_VERSION_MAJOR (24) + +#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field) + +#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \ + BM_SSP_CTRL1_RESP_ERR_IRQ | \ + BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \ + BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \ + BM_SSP_CTRL1_DATA_CRC_IRQ | \ + BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \ + BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \ + BM_SSP_CTRL1_FIFO_OVERRUN_IRQ) + +#define SSP_PIO_NUM 3 + +struct mxs_mmc_host { + struct mmc_host *mmc; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + void __iomem *base; + int irq; + struct resource *res; + struct resource *dma_res; + struct clk *clk; + unsigned int clk_rate; + + struct dma_chan *dmach; + struct mxs_dma_data dma_data; + unsigned int dma_dir; + u32 ssp_pio_words[SSP_PIO_NUM]; + + unsigned int version; + unsigned char bus_width; + spinlock_t lock; + int sdio_irq_en; +}; + +static int mxs_mmc_get_ro(struct mmc_host *mmc) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + struct mxs_mmc_platform_data *pdata = + mmc_dev(host->mmc)->platform_data; + + if (!pdata) + return -EFAULT; + + if (!gpio_is_valid(pdata->wp_gpio)) + return -EINVAL; + + return gpio_get_value(pdata->wp_gpio); +} + +static int mxs_mmc_get_cd(struct mmc_host *mmc) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + + return !(readl(host->base + HW_SSP_STATUS) & + BM_SSP_STATUS_CARD_DETECT); +} + +static void mxs_mmc_reset(struct mxs_mmc_host *host) +{ + u32 ctrl0, ctrl1; + + mxs_reset_block(host->base); + + ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; + ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | + BF_SSP(0x7, CTRL1_WORD_LENGTH) | + BM_SSP_CTRL1_DMA_ENABLE | + BM_SSP_CTRL1_POLARITY | + BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN | + BM_SSP_CTRL1_DATA_CRC_IRQ_EN | + BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN | + BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN | + BM_SSP_CTRL1_RESP_ERR_IRQ_EN; + + writel(BF_SSP(0xffff, TIMING_TIMEOUT) | + BF_SSP(2, TIMING_CLOCK_DIVIDE) | + BF_SSP(0, TIMING_CLOCK_RATE), + host->base + HW_SSP_TIMING); + + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN; + } + + writel(ctrl0, host->base + HW_SSP_CTRL0); + writel(ctrl1, host->base + HW_SSP_CTRL1); +} + +static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, + struct mmc_command *cmd); + +static void mxs_mmc_request_done(struct mxs_mmc_host *host) +{ + struct mmc_command *cmd = host->cmd; + struct mmc_data *data = host->data; + struct mmc_request *mrq = host->mrq; + + if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { + if (mmc_resp_type(cmd) & MMC_RSP_136) { + cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0); + cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1); + cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2); + cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3); + } else { + cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0); + } + } + + if (data) { + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, host->dma_dir); + /* + * If there was an error on any block, we mark all + * data blocks as being in error. + */ + if (!data->error) + data->bytes_xfered = data->blocks * data->blksz; + else + data->bytes_xfered = 0; + + host->data = NULL; + if (mrq->stop) { + mxs_mmc_start_cmd(host, mrq->stop); + return; + } + } + + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + +static void mxs_mmc_dma_irq_callback(void *param) +{ + struct mxs_mmc_host *host = param; + + mxs_mmc_request_done(host); +} + +static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) +{ + struct mxs_mmc_host *host = dev_id; + struct mmc_command *cmd = host->cmd; + struct mmc_data *data = host->data; + u32 stat; + + spin_lock(&host->lock); + + stat = readl(host->base + HW_SSP_CTRL1); + writel(stat & MXS_MMC_IRQ_BITS, + host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); + + if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) + mmc_signal_sdio_irq(host->mmc); + + spin_unlock(&host->lock); + + if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) + cmd->error = -ETIMEDOUT; + else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) + cmd->error = -EIO; + + if (data) { + if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | + BM_SSP_CTRL1_RECV_TIMEOUT_IRQ)) + data->error = -ETIMEDOUT; + else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ) + data->error = -EILSEQ; + else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | + BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)) + data->error = -EIO; + } + + return IRQ_HANDLED; +} + +static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( + struct mxs_mmc_host *host, unsigned int append) +{ + struct dma_async_tx_descriptor *desc; + struct mmc_data *data = host->data; + struct scatterlist * sgl; + unsigned int sg_len; + + if (data) { + /* data */ + dma_map_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, host->dma_dir); + sgl = data->sg; + sg_len = data->sg_len; + } else { + /* pio */ + sgl = (struct scatterlist *) host->ssp_pio_words; + sg_len = SSP_PIO_NUM; + } + + desc = host->dmach->device->device_prep_slave_sg(host->dmach, + sgl, sg_len, host->dma_dir, append); + if (desc) { + desc->callback = mxs_mmc_dma_irq_callback; + desc->callback_param = host; + } else { + if (data) + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, host->dma_dir); + } + + return desc; +} + +static void mxs_mmc_bc(struct mxs_mmc_host *host) +{ + struct mmc_command *cmd = host->cmd; + struct dma_async_tx_descriptor *desc; + u32 ctrl0, cmd0, cmd1; + + ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC; + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC; + cmd1 = cmd->arg; + + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; + } + + host->ssp_pio_words[0] = ctrl0; + host->ssp_pio_words[1] = cmd0; + host->ssp_pio_words[2] = cmd1; + host->dma_dir = DMA_NONE; + desc = mxs_mmc_prep_dma(host, 0); + if (!desc) + goto out; + + dmaengine_submit(desc); + return; + +out: + dev_warn(mmc_dev(host->mmc), + "%s: failed to prep dma\n", __func__); +} + +static void mxs_mmc_ac(struct mxs_mmc_host *host) +{ + struct mmc_command *cmd = host->cmd; + struct dma_async_tx_descriptor *desc; + u32 ignore_crc, get_resp, long_resp; + u32 ctrl0, cmd0, cmd1; + + ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? + 0 : BM_SSP_CTRL0_IGNORE_CRC; + get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? + BM_SSP_CTRL0_GET_RESP : 0; + long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? + BM_SSP_CTRL0_LONG_RESP : 0; + + ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp; + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); + cmd1 = cmd->arg; + + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; + } + + host->ssp_pio_words[0] = ctrl0; + host->ssp_pio_words[1] = cmd0; + host->ssp_pio_words[2] = cmd1; + host->dma_dir = DMA_NONE; + desc = mxs_mmc_prep_dma(host, 0); + if (!desc) + goto out; + + dmaengine_submit(desc); + return; + +out: + dev_warn(mmc_dev(host->mmc), + "%s: failed to prep dma\n", __func__); +} + +static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns) +{ + const unsigned int ssp_timeout_mul = 4096; + /* + * Calculate ticks in ms since ns are large numbers + * and might overflow + */ + const unsigned int clock_per_ms = clock_rate / 1000; + const unsigned int ms = ns / 1000; + const unsigned int ticks = ms * clock_per_ms; + const unsigned int ssp_ticks = ticks / ssp_timeout_mul; + + WARN_ON(ssp_ticks == 0); + return ssp_ticks; +} + +static void mxs_mmc_adtc(struct mxs_mmc_host *host) +{ + struct mmc_command *cmd = host->cmd; + struct mmc_data *data = cmd->data; + struct dma_async_tx_descriptor *desc; + struct scatterlist *sgl = data->sg, *sg; + unsigned int sg_len = data->sg_len; + int i; + + unsigned short dma_data_dir, timeout; + unsigned int data_size = 0, log2_blksz; + unsigned int blocks = data->blocks; + + u32 ignore_crc, get_resp, long_resp, read; + u32 ctrl0, cmd0, cmd1, val; + + ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? + 0 : BM_SSP_CTRL0_IGNORE_CRC; + get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? + BM_SSP_CTRL0_GET_RESP : 0; + long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? + BM_SSP_CTRL0_LONG_RESP : 0; + + if (data->flags & MMC_DATA_WRITE) { + dma_data_dir = DMA_TO_DEVICE; + read = 0; + } else { + dma_data_dir = DMA_FROM_DEVICE; + read = BM_SSP_CTRL0_READ; + } + + ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) | + ignore_crc | get_resp | long_resp | + BM_SSP_CTRL0_DATA_XFER | read | + BM_SSP_CTRL0_WAIT_FOR_IRQ | + BM_SSP_CTRL0_ENABLE; + + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); + + /* get logarithm to base 2 of block size for setting register */ + log2_blksz = ilog2(data->blksz); + + /* + * take special care of the case that data size from data->sg + * is not equal to blocks x blksz + */ + for_each_sg(sgl, sg, sg_len, i) + data_size += sg->length; + + if (data_size != data->blocks * data->blksz) + blocks = 1; + + /* xfer count, block size and count need to be set differently */ + if (ssp_is_old()) { + ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); + cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | + BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); + } else { + writel(data_size, host->base + HW_SSP_XFER_SIZE); + writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) | + BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT), + host->base + HW_SSP_BLOCK_SIZE); + } + + if ((cmd->opcode == MMC_STOP_TRANSMISSION) || + (cmd->opcode == SD_IO_RW_EXTENDED)) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + + cmd1 = cmd->arg; + + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; + } + + /* set the timeout count */ + timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns); + val = readl(host->base + HW_SSP_TIMING); + val &= ~(BM_SSP_TIMING_TIMEOUT); + val |= BF_SSP(timeout, TIMING_TIMEOUT); + writel(val, host->base + HW_SSP_TIMING); + + /* pio */ + host->ssp_pio_words[0] = ctrl0; + host->ssp_pio_words[1] = cmd0; + host->ssp_pio_words[2] = cmd1; + host->dma_dir = DMA_NONE; + desc = mxs_mmc_prep_dma(host, 0); + if (!desc) + goto out; + + /* append data sg */ + WARN_ON(host->data != NULL); + host->data = data; + host->dma_dir = dma_data_dir; + desc = mxs_mmc_prep_dma(host, 1); + if (!desc) + goto out; + + dmaengine_submit(desc); + return; +out: + dev_warn(mmc_dev(host->mmc), + "%s: failed to prep dma\n", __func__); +} + +static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, + struct mmc_command *cmd) +{ + host->cmd = cmd; + + switch (mmc_cmd_type(cmd)) { + case MMC_CMD_BC: + mxs_mmc_bc(host); + break; + case MMC_CMD_BCR: + mxs_mmc_ac(host); + break; + case MMC_CMD_AC: + mxs_mmc_ac(host); + break; + case MMC_CMD_ADTC: + mxs_mmc_adtc(host); + break; + default: + dev_warn(mmc_dev(host->mmc), + "%s: unknown MMC command\n", __func__); + break; + } +} + +static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + + WARN_ON(host->mrq != NULL); + host->mrq = mrq; + mxs_mmc_start_cmd(host, mrq->cmd); +} + +static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate) +{ + unsigned int ssp_rate, bit_rate; + u32 div1, div2; + u32 val; + + ssp_rate = clk_get_rate(host->clk); + + for (div1 = 2; div1 < 254; div1 += 2) { + div2 = ssp_rate / rate / div1; + if (div2 < 0x100) + break; + } + + if (div1 >= 254) { + dev_err(mmc_dev(host->mmc), + "%s: cannot set clock to %d\n", __func__, rate); + return; + } + + if (div2 == 0) + bit_rate = ssp_rate / div1; + else + bit_rate = ssp_rate / div1 / div2; + + val = readl(host->base + HW_SSP_TIMING); + val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); + val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE); + val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE); + writel(val, host->base + HW_SSP_TIMING); + + host->clk_rate = bit_rate; + + dev_dbg(mmc_dev(host->mmc), + "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n", + __func__, div1, div2, ssp_rate, bit_rate, rate); +} + +static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + + if (ios->bus_width == MMC_BUS_WIDTH_8) + host->bus_width = 2; + else if (ios->bus_width == MMC_BUS_WIDTH_4) + host->bus_width = 1; + else + host->bus_width = 0; + + if (ios->clock) + mxs_mmc_set_clk_rate(host, ios->clock); +} + +static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct mxs_mmc_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + host->sdio_irq_en = enable; + + if (enable) { + writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, + host->base + HW_SSP_CTRL0 + MXS_SET_ADDR); + writel(BM_SSP_CTRL1_SDIO_IRQ_EN, + host->base + HW_SSP_CTRL1 + MXS_SET_ADDR); + + if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ) + mmc_signal_sdio_irq(host->mmc); + + } else { + writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, + host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR); + writel(BM_SSP_CTRL1_SDIO_IRQ_EN, + host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); + } + + spin_unlock_irqrestore(&host->lock, flags); +} + +static const struct mmc_host_ops mxs_mmc_ops = { + .request = mxs_mmc_request, + .get_ro = mxs_mmc_get_ro, + .get_cd = mxs_mmc_get_cd, + .set_ios = mxs_mmc_set_ios, + .enable_sdio_irq = mxs_mmc_enable_sdio_irq, +}; + +static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param) +{ + struct mxs_mmc_host *host = param; + + if (!mxs_dma_is_apbh(chan)) + return false; + + if (chan->chan_id != host->dma_res->start) + return false; + + chan->private = &host->dma_data; + + return true; +} + +static int mxs_mmc_probe(struct platform_device *pdev) +{ + struct mxs_mmc_host *host; + struct mmc_host *mmc; + struct resource *iores, *dmares, *r; + struct mxs_mmc_platform_data *pdata; + int ret = 0, irq_err, irq_dma; + dma_cap_mask_t mask; + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); + irq_err = platform_get_irq(pdev, 0); + irq_dma = platform_get_irq(pdev, 1); + if (!iores || !dmares || irq_err < 0 || irq_dma < 0) + return -EINVAL; + + r = request_mem_region(iores->start, resource_size(iores), pdev->name); + if (!r) + return -EBUSY; + + mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto out_release_mem; + } + + host = mmc_priv(mmc); + host->base = ioremap(r->start, resource_size(r)); + if (!host->base) { + ret = -ENOMEM; + goto out_mmc_free; + } + + /* only major verion does matter */ + host->version = readl(host->base + HW_SSP_VERSION) >> + BP_SSP_VERSION_MAJOR; + + host->mmc = mmc; + host->res = r; + host->dma_res = dmares; + host->irq = irq_err; + host->sdio_irq_en = 0; + + host->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto out_iounmap; + } + clk_enable(host->clk); + + mxs_mmc_reset(host); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + host->dma_data.chan_irq = irq_dma; + host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host); + if (!host->dmach) { + dev_err(mmc_dev(host->mmc), + "%s: failed to request dma\n", __func__); + goto out_clk_put; + } + + /* set mmc core parameters */ + mmc->ops = &mxs_mmc_ops; + mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; + + pdata = mmc_dev(host->mmc)->platform_data; + if (pdata) { + if (pdata->flags & SLOTF_8_BIT_CAPABLE) + mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; + if (pdata->flags & SLOTF_4_BIT_CAPABLE) + mmc->caps |= MMC_CAP_4_BIT_DATA; + } + + mmc->f_min = 400000; + mmc->f_max = 288000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + mmc->max_segs = 52; + mmc->max_blk_size = 1 << 0xf; + mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff; + mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff; + mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev); + + platform_set_drvdata(pdev, mmc); + + ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host); + if (ret) + goto out_free_dma; + + spin_lock_init(&host->lock); + + ret = mmc_add_host(mmc); + if (ret) + goto out_free_irq; + + dev_info(mmc_dev(host->mmc), "initialized\n"); + + return 0; + +out_free_irq: + free_irq(host->irq, host); +out_free_dma: + if (host->dmach) + dma_release_channel(host->dmach); +out_clk_put: + clk_disable(host->clk); + clk_put(host->clk); +out_iounmap: + iounmap(host->base); +out_mmc_free: + mmc_free_host(mmc); +out_release_mem: + release_mem_region(iores->start, resource_size(iores)); + return ret; +} + +static int mxs_mmc_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct mxs_mmc_host *host = mmc_priv(mmc); + struct resource *res = host->res; + + mmc_remove_host(mmc); + + free_irq(host->irq, host); + + platform_set_drvdata(pdev, NULL); + + if (host->dmach) + dma_release_channel(host->dmach); + + clk_disable(host->clk); + clk_put(host->clk); + + iounmap(host->base); + + mmc_free_host(mmc); + + release_mem_region(res->start, resource_size(res)); + + return 0; +} + +#ifdef CONFIG_PM +static int mxs_mmc_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct mxs_mmc_host *host = mmc_priv(mmc); + int ret = 0; + + ret = mmc_suspend_host(mmc); + + clk_disable(host->clk); + + return ret; +} + +static int mxs_mmc_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct mxs_mmc_host *host = mmc_priv(mmc); + int ret = 0; + + clk_enable(host->clk); + + ret = mmc_resume_host(mmc); + + return ret; +} + +static const struct dev_pm_ops mxs_mmc_pm_ops = { + .suspend = mxs_mmc_suspend, + .resume = mxs_mmc_resume, +}; +#endif + +static struct platform_driver mxs_mmc_driver = { + .probe = mxs_mmc_probe, + .remove = mxs_mmc_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &mxs_mmc_pm_ops, +#endif + }, +}; + +static int __init mxs_mmc_init(void) +{ + return platform_driver_register(&mxs_mmc_driver); +} + +static void __exit mxs_mmc_exit(void) +{ + platform_driver_unregister(&mxs_mmc_driver); +} + +module_init(mxs_mmc_init); +module_exit(mxs_mmc_exit); + +MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); +MODULE_AUTHOR("Freescale Semiconductor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 5530def54e5b..e2aecb7f1d5c 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -15,9 +15,11 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> +#include <linux/irq.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/of_irq.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/core.h> diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 379d2ffe4c87..a6c329040140 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -832,7 +832,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) return IRQ_HANDLED; } - if (end_command) + if (end_command && host->cmd) mmc_omap_cmd_done(host, host->cmd); if (host->data != NULL) { if (transfer_error) @@ -1417,7 +1417,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev) if (res == NULL || irq < 0) return -ENXIO; - res = request_mem_region(res->start, res->end - res->start + 1, + res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) return -EBUSY; @@ -1457,7 +1457,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev) host->irq = irq; host->phys_base = host->mem_res->start; - host->virt_base = ioremap(res->start, res->end - res->start + 1); + host->virt_base = ioremap(res->start, resource_size(res)); if (!host->virt_base) goto err_ioremap; @@ -1514,7 +1514,7 @@ err_free_mmc_host: err_ioremap: kfree(host); err_free_mem_region: - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); return ret; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 158c0ee53b2c..259ece047afc 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2047,8 +2047,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) res->start += pdata->reg_offset; res->end += pdata->reg_offset; - res = request_mem_region(res->start, res->end - res->start + 1, - pdev->name); + res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) return -EBUSY; @@ -2287,7 +2286,7 @@ err1: err_alloc: omap_hsmmc_gpio_free(pdata); err: - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); return ret; } @@ -2324,7 +2323,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); return 0; diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 1ccd4b256cee..a04f87d7ee3d 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -874,7 +874,7 @@ static void finalize_request(struct s3cmci_host *host) if (!mrq->data) goto request_done; - /* Calulate the amout of bytes transfer if there was no error */ + /* Calculate the amout of bytes transfer if there was no error */ if (mrq->data->error == 0) { mrq->data->bytes_xfered = (mrq->data->blocks * mrq->data->blksz); @@ -882,7 +882,7 @@ static void finalize_request(struct s3cmci_host *host) mrq->data->bytes_xfered = 0; } - /* If we had an error while transfering data we flush the + /* If we had an error while transferring data we flush the * DMA channel and the fifo to clear out any garbage. */ if (mrq->data->error != 0) { if (s3cmci_host_usedma(host)) @@ -980,7 +980,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data) if ((data->blksz & 3) != 0) { /* We cannot deal with unaligned blocks with more than - * one block being transfered. */ + * one block being transferred. */ if (data->blocks > 1) { pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 9b82910b9dbb..a19967d0bfc4 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -15,13 +15,41 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> +#include <linux/gpio.h> +#include <linux/slab.h> #include <linux/mmc/host.h> #include <linux/mmc/sdhci-pltfm.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> #include <mach/hardware.h> +#include <mach/esdhc.h> #include "sdhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" +/* VENDOR SPEC register */ +#define SDHCI_VENDOR_SPEC 0xC0 +#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 + +#define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) +/* + * The CMDTYPE of the CMD register (offset 0xE) should be set to + * "11" when the STOP CMD12 is issued on imx53 to abort one + * open ended multi-blk IO. Otherwise the TC INT wouldn't + * be generated. + * In exact block transfer, the controller doesn't complete the + * operations automatically as required at the end of the + * transfer and remains on hold if the abort command is not sent. + * As a result, the TC flag is not asserted and SW received timeout + * exeception. Bit1 of Vendor Spec registor is used to fix it. + */ +#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) + +struct pltfm_imx_data { + int flags; + u32 scratchpad; +}; + static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) { void __iomem *base = host->ioaddr + (reg & ~0x3); @@ -30,6 +58,56 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); } +static u32 esdhc_readl_le(struct sdhci_host *host, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + + /* fake CARD_PRESENT flag on mx25/35 */ + u32 val = readl(host->ioaddr + reg); + + if (unlikely((reg == SDHCI_PRESENT_STATE) + && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { + struct esdhc_platform_data *boarddata = + host->mmc->parent->platform_data; + + if (boarddata && gpio_is_valid(boarddata->cd_gpio) + && gpio_get_value(boarddata->cd_gpio)) + /* no card, if a valid gpio says so... */ + val &= SDHCI_CARD_PRESENT; + else + /* ... in all other cases assume card is present */ + val |= SDHCI_CARD_PRESENT; + } + + return val; +} + +static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + + if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) + && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) + /* + * these interrupts won't work with a custom card_detect gpio + * (only applied to mx25/35) + */ + val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); + + if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + && (reg == SDHCI_INT_STATUS) + && (val & SDHCI_INT_DATA_END))) { + u32 v; + v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); + v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; + writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); + } + + writel(val, host->ioaddr + reg); +} + static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { if (unlikely(reg == SDHCI_HOST_VERSION)) @@ -41,6 +119,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; switch (reg) { case SDHCI_TRANSFER_MODE: @@ -48,10 +127,22 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) * Postpone this write, we must do it together with a * command write that is down below. */ - pltfm_host->scratchpad = val; + if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + && (host->cmd->opcode == SD_IO_RW_EXTENDED) + && (host->cmd->data->blocks > 1) + && (host->cmd->data->flags & MMC_DATA_READ)) { + u32 v; + v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); + v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; + writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); + } + imx_data->scratchpad = val; return; case SDHCI_COMMAND: - writel(val << 16 | pltfm_host->scratchpad, + if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) + && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + val |= SDHCI_CMD_ABORTCMD; + writel(val << 16 | imx_data->scratchpad, host->ioaddr + SDHCI_TRANSFER_MODE); return; case SDHCI_BLOCK_SIZE: @@ -100,10 +191,42 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) return clk_get_rate(pltfm_host->clk) / 256 / 16; } +static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) +{ + struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; + + if (boarddata && gpio_is_valid(boarddata->wp_gpio)) + return gpio_get_value(boarddata->wp_gpio); + else + return -ENOSYS; +} + +static struct sdhci_ops sdhci_esdhc_ops = { + .read_l = esdhc_readl_le, + .read_w = esdhc_readw_le, + .write_l = esdhc_writel_le, + .write_w = esdhc_writew_le, + .write_b = esdhc_writeb_le, + .set_clock = esdhc_set_clock, + .get_max_clock = esdhc_pltfm_get_max_clock, + .get_min_clock = esdhc_pltfm_get_min_clock, +}; + +static irqreturn_t cd_irq(int irq, void *data) +{ + struct sdhci_host *sdhost = (struct sdhci_host *)data; + + tasklet_schedule(&sdhost->card_tasklet); + return IRQ_HANDLED; +}; + static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct clk *clk; + int err; + struct pltfm_imx_data *imx_data; clk = clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { @@ -113,35 +236,94 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd clk_enable(clk); pltfm_host->clk = clk; - if (cpu_is_mx35() || cpu_is_mx51()) + imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); + if (!imx_data) { + clk_disable(pltfm_host->clk); + clk_put(pltfm_host->clk); + return -ENOMEM; + } + pltfm_host->priv = imx_data; + + if (!cpu_is_mx25()) host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */ - if (cpu_is_mx25() || cpu_is_mx35()) + if (cpu_is_mx25() || cpu_is_mx35()) { + /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; + /* write_protect can't be routed to controller, use gpio */ + sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; + } + + if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) + imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; + + if (boarddata) { + err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); + if (err) { + dev_warn(mmc_dev(host->mmc), + "no write-protect pin available!\n"); + boarddata->wp_gpio = err; + } + err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD"); + if (err) { + dev_warn(mmc_dev(host->mmc), + "no card-detect pin available!\n"); + goto no_card_detect_pin; + } + + /* i.MX5x has issues to be researched */ + if (!cpu_is_mx25() && !cpu_is_mx35()) + goto not_supported; + + err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + mmc_hostname(host->mmc), host); + if (err) { + dev_warn(mmc_dev(host->mmc), "request irq error\n"); + goto no_card_detect_irq; + } + + imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; + /* Now we have a working card_detect again */ + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + } + + return 0; + + no_card_detect_irq: + gpio_free(boarddata->cd_gpio); + no_card_detect_pin: + boarddata->cd_gpio = err; + not_supported: + kfree(imx_data); return 0; } static void esdhc_pltfm_exit(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; + struct pltfm_imx_data *imx_data = pltfm_host->priv; + + if (boarddata && gpio_is_valid(boarddata->wp_gpio)) + gpio_free(boarddata->wp_gpio); + + if (boarddata && gpio_is_valid(boarddata->cd_gpio)) { + gpio_free(boarddata->cd_gpio); + + if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) + free_irq(gpio_to_irq(boarddata->cd_gpio), host); + } clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); + kfree(imx_data); } -static struct sdhci_ops sdhci_esdhc_ops = { - .read_w = esdhc_readw_le, - .write_w = esdhc_writew_le, - .write_b = esdhc_writeb_le, - .set_clock = esdhc_set_clock, - .get_max_clock = esdhc_pltfm_get_max_clock, - .get_min_clock = esdhc_pltfm_get_min_clock, -}; - struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { - .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA, + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA + | SDHCI_QUIRK_BROKEN_CARD_DETECTION, /* ADMA has issues. Might be fixable */ .ops = &sdhci_esdhc_ops, .init = esdhc_pltfm_init, diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index afaf1bc4913a..c3b08f111942 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -19,13 +19,11 @@ */ #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ - SDHCI_QUIRK_BROKEN_CARD_DETECTION | \ SDHCI_QUIRK_NO_BUSY_IRQ | \ SDHCI_QUIRK_NONSTANDARD_CLOCK | \ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ SDHCI_QUIRK_PIO_NEEDS_DELAY | \ - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ - SDHCI_QUIRK_NO_CARD_NO_RESET) + SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) #define ESDHC_SYSTEM_CONTROL 0x2c #define ESDHC_CLOCK_MASK 0x0000fff0 diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index fcd0e1fcba44..ba40d6d035c7 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -73,7 +73,9 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) } struct sdhci_of_data sdhci_esdhc = { - .quirks = ESDHC_DEFAULT_QUIRKS, + /* card detection could be handled via GPIO */ + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION + | SDHCI_QUIRK_NO_CARD_NO_RESET, .ops = { .read_l = sdhci_be32bs_readl, .read_w = esdhc_readw, diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 0dc905b20eee..f8b5f37007b2 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -547,6 +547,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { }, { + .vendor = PCI_VENDOR_ID_RICOH, + .device = 0xe823, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, + }, + + { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB712_SD, .subvendor = PCI_ANY_ID, @@ -900,9 +908,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( { struct sdhci_pci_slot *slot; struct sdhci_host *host; - - resource_size_t addr; - int ret; if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { @@ -949,10 +954,10 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( goto free; } - addr = pci_resource_start(pdev, bar); host->ioaddr = pci_ioremap_bar(pdev, bar); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); + ret = -ENOMEM; goto release; } @@ -1012,16 +1017,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; - u8 slots, rev, first_bar; + u8 slots, first_bar; int ret, i; BUG_ON(pdev == NULL); BUG_ON(ent == NULL); - pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); - dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", - (int)pdev->vendor, (int)pdev->device, (int)rev); + (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); if (ret) diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index ea2e44d9be5e..2b37016ad0ac 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -17,7 +17,7 @@ struct sdhci_pltfm_host { struct clk *clk; - u32 scratchpad; /* to handle quirks across io-accessor calls */ + void *priv; /* to handle quirks across io-accessor calls */ }; extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 5309ab95aada..69e3ee321eb5 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -499,6 +499,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) * SDHCI block, or a missing configuration that needs to be set. */ host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; + /* This host supports the Auto CMD12 */ + host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + if (pdata->cd_type == S3C_SDHCI_CD_NONE || pdata->cd_type == S3C_SDHCI_CD_PERMANENT) host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index d70c54c7b70a..60a4c97d3d18 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -50,7 +50,7 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) /* val == 1 -> card removed, val == 0 -> card inserted */ /* if card removed - set irq for low level, else vice versa */ gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; - set_irq_type(irq, gpio_irq_type); + irq_set_irq_type(irq, gpio_irq_type); if (sdhci->data->card_power_gpio >= 0) { if (!sdhci->data->power_always_enb) { diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 4823ee94a63f..f7e1f964395f 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -169,7 +169,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate wp gpio\n"); - goto out_cd; + goto out_irq; } tegra_gpio_enable(plat->wp_gpio); gpio_direction_input(plat->wp_gpio); @@ -195,6 +195,9 @@ out_wp: gpio_free(plat->wp_gpio); } +out_irq: + if (gpio_is_valid(plat->cd_gpio)) + free_irq(gpio_to_irq(plat->cd_gpio), host); out_cd: if (gpio_is_valid(plat->cd_gpio)) { tegra_gpio_disable(plat->cd_gpio); @@ -225,6 +228,7 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host) } if (gpio_is_valid(plat->cd_gpio)) { + free_irq(gpio_to_irq(plat->cd_gpio), host); tegra_gpio_disable(plat->cd_gpio); gpio_free(plat->cd_gpio); } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 9e15f41f87be..5d20661bc357 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1334,6 +1334,13 @@ static void sdhci_tasklet_finish(unsigned long param) host = (struct sdhci_host*)param; + /* + * If this tasklet gets rescheduled while running, it will + * be run again afterwards but without any active request. + */ + if (!host->mrq) + return; + spin_lock_irqsave(&host->lock, flags); del_timer(&host->timer); @@ -1345,7 +1352,7 @@ static void sdhci_tasklet_finish(unsigned long param) * upon error conditions. */ if (!(host->flags & SDHCI_DEVICE_DEAD) && - (mrq->cmd->error || + ((mrq->cmd && mrq->cmd->error) || (mrq->data && (mrq->data->error || (mrq->data->stop && mrq->data->stop->error))) || (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 6e0969e40650..25e8bde600d1 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -45,6 +45,7 @@ #define SDHCI_CMD_CRC 0x08 #define SDHCI_CMD_INDEX 0x10 #define SDHCI_CMD_DATA 0x20 +#define SDHCI_CMD_ABORTCMD 0xC0 #define SDHCI_CMD_RESP_NONE 0x00 #define SDHCI_CMD_RESP_LONG 0x01 diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 12884c270171..af97015a2fc7 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -169,7 +169,7 @@ struct sh_mmcif_host { struct dma_chan *chan_rx; struct dma_chan *chan_tx; struct completion dma_complete; - unsigned int dma_sglen; + bool dma_active; }; static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, @@ -194,10 +194,12 @@ static void mmcif_dma_complete(void *arg) return; if (host->data->flags & MMC_DATA_READ) - dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen, + dma_unmap_sg(host->chan_rx->device->dev, + host->data->sg, host->data->sg_len, DMA_FROM_DEVICE); else - dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen, + dma_unmap_sg(host->chan_tx->device->dev, + host->data->sg, host->data->sg_len, DMA_TO_DEVICE); complete(&host->dma_complete); @@ -211,9 +213,10 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) dma_cookie_t cookie = -EINVAL; int ret; - ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_FROM_DEVICE); + ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len, + DMA_FROM_DEVICE); if (ret > 0) { - host->dma_sglen = ret; + host->dma_active = true; desc = chan->device->device_prep_slave_sg(chan, sg, ret, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } @@ -221,14 +224,9 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) if (desc) { desc->callback = mmcif_dma_complete; desc->callback_param = host; - cookie = desc->tx_submit(desc); - if (cookie < 0) { - desc = NULL; - ret = cookie; - } else { - sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); - chan->device->device_issue_pending(chan); - } + cookie = dmaengine_submit(desc); + sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); + dma_async_issue_pending(chan); } dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", __func__, host->data->sg_len, ret, cookie); @@ -238,7 +236,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) if (ret >= 0) ret = -EIO; host->chan_rx = NULL; - host->dma_sglen = 0; + host->dma_active = false; dma_release_channel(chan); /* Free the Tx channel too */ chan = host->chan_tx; @@ -263,9 +261,10 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) dma_cookie_t cookie = -EINVAL; int ret; - ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_TO_DEVICE); + ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len, + DMA_TO_DEVICE); if (ret > 0) { - host->dma_sglen = ret; + host->dma_active = true; desc = chan->device->device_prep_slave_sg(chan, sg, ret, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } @@ -273,14 +272,9 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) if (desc) { desc->callback = mmcif_dma_complete; desc->callback_param = host; - cookie = desc->tx_submit(desc); - if (cookie < 0) { - desc = NULL; - ret = cookie; - } else { - sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); - chan->device->device_issue_pending(chan); - } + cookie = dmaengine_submit(desc); + sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); + dma_async_issue_pending(chan); } dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", __func__, host->data->sg_len, ret, cookie); @@ -290,7 +284,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) if (ret >= 0) ret = -EIO; host->chan_tx = NULL; - host->dma_sglen = 0; + host->dma_active = false; dma_release_channel(chan); /* Free the Rx channel too */ chan = host->chan_rx; @@ -317,7 +311,7 @@ static bool sh_mmcif_filter(struct dma_chan *chan, void *arg) static void sh_mmcif_request_dma(struct sh_mmcif_host *host, struct sh_mmcif_plat_data *pdata) { - host->dma_sglen = 0; + host->dma_active = false; /* We can only either use DMA for both Tx and Rx or not use it at all */ if (pdata->dma) { @@ -364,7 +358,7 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host) dma_release_channel(chan); } - host->dma_sglen = 0; + host->dma_active = false; } static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) @@ -753,7 +747,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, } sh_mmcif_get_response(host, cmd); if (host->data) { - if (!host->dma_sglen) { + if (!host->dma_active) { ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); } else { long time = @@ -765,7 +759,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, ret = time; sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); - host->dma_sglen = 0; + host->dma_active = false; } if (ret < 0) mrq->data->bytes_xfered = 0; @@ -850,15 +844,15 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct sh_mmcif_host *host = mmc_priv(mmc); struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; - if (ios->power_mode == MMC_POWER_OFF) { + if (ios->power_mode == MMC_POWER_UP) { + if (p->set_pwr) + p->set_pwr(host->pd, ios->power_mode); + } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* clock stop */ sh_mmcif_clock_control(host, 0); - if (p->down_pwr) + if (ios->power_mode == MMC_POWER_OFF && p->down_pwr) p->down_pwr(host->pd); return; - } else if (ios->power_mode == MMC_POWER_UP) { - if (p->set_pwr) - p->set_pwr(host->pd, ios->power_mode); } if (ios->clock) diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c new file mode 100644 index 000000000000..cc701236d16f --- /dev/null +++ b/drivers/mmc/host/sh_mobile_sdhi.c @@ -0,0 +1,171 @@ +/* + * SuperH Mobile SDHI + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on "Compaq ASIC3 support": + * + * Copyright 2001 Compaq Computer Corporation. + * Copyright 2004-2005 Phil Blundell + * Copyright 2007-2008 OpenedHand Ltd. + * + * Authors: Phil Blundell <pb@handhelds.org>, + * Samuel Ortiz <sameo@openedhand.com> + * + */ + +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sh_mobile_sdhi.h> +#include <linux/mfd/tmio.h> +#include <linux/sh_dma.h> + +#include "tmio_mmc.h" + +struct sh_mobile_sdhi { + struct clk *clk; + struct tmio_mmc_data mmc_data; + struct sh_dmae_slave param_tx; + struct sh_dmae_slave param_rx; + struct tmio_mmc_dma dma_priv; +}; + +static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) +{ + struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; + + if (p && p->set_pwr) + p->set_pwr(pdev, state); +} + +static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) +{ + struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; + + if (p && p->get_cd) + return p->get_cd(pdev); + else + return -ENOSYS; +} + +static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) +{ + struct sh_mobile_sdhi *priv; + struct tmio_mmc_data *mmc_data; + struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; + struct tmio_mmc_host *host; + char clk_name[8]; + int ret; + + priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); + if (priv == NULL) { + dev_err(&pdev->dev, "kzalloc failed\n"); + return -ENOMEM; + } + + mmc_data = &priv->mmc_data; + + snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); + priv->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(priv->clk); + goto eclkget; + } + + clk_enable(priv->clk); + + mmc_data->hclk = clk_get_rate(priv->clk); + mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; + mmc_data->get_cd = sh_mobile_sdhi_get_cd; + mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; + if (p) { + mmc_data->flags = p->tmio_flags; + mmc_data->ocr_mask = p->tmio_ocr_mask; + mmc_data->capabilities |= p->tmio_caps; + + if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { + priv->param_tx.slave_id = p->dma_slave_tx; + priv->param_rx.slave_id = p->dma_slave_rx; + priv->dma_priv.chan_priv_tx = &priv->param_tx; + priv->dma_priv.chan_priv_rx = &priv->param_rx; + priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ + mmc_data->dma = &priv->dma_priv; + } + } + + /* + * All SDHI blocks support 2-byte and larger block sizes in 4-bit + * bus width mode. + */ + mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; + + /* + * All SDHI blocks support SDIO IRQ signalling. + */ + mmc_data->flags |= TMIO_MMC_SDIO_IRQ; + + ret = tmio_mmc_host_probe(&host, pdev, mmc_data); + if (ret < 0) + goto eprobe; + + pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), + (unsigned long)host->ctl, host->irq); + + return ret; + +eprobe: + clk_disable(priv->clk); + clk_put(priv->clk); +eclkget: + kfree(priv); + return ret; +} + +static int sh_mobile_sdhi_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct tmio_mmc_host *host = mmc_priv(mmc); + struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); + + tmio_mmc_host_remove(host); + clk_disable(priv->clk); + clk_put(priv->clk); + kfree(priv); + + return 0; +} + +static struct platform_driver sh_mobile_sdhi_driver = { + .driver = { + .name = "sh_mobile_sdhi", + .owner = THIS_MODULE, + }, + .probe = sh_mobile_sdhi_probe, + .remove = __devexit_p(sh_mobile_sdhi_remove), +}; + +static int __init sh_mobile_sdhi_init(void) +{ + return platform_driver_register(&sh_mobile_sdhi_driver); +} + +static void __exit sh_mobile_sdhi_exit(void) +{ + platform_driver_unregister(&sh_mobile_sdhi_driver); +} + +module_init(sh_mobile_sdhi_init); +module_exit(sh_mobile_sdhi_exit); + +MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); +MODULE_AUTHOR("Magnus Damm"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sh_mobile_sdhi"); diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index e3c6ef208391..79c568461d59 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -1,8 +1,8 @@ /* - * linux/drivers/mmc/tmio_mmc.c + * linux/drivers/mmc/host/tmio_mmc.c * - * Copyright (C) 2004 Ian Molton - * Copyright (C) 2007 Ian Molton + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -11,1203 +11,22 @@ * Driver for the MMC / SD / SDIO cell found in: * * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 - * - * This driver draws mainly on scattered spec sheets, Reverse engineering - * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit - * support). (Further 4 bit support from a later datasheet). - * - * TODO: - * Investigate using a workqueue for PIO transfers - * Eliminate FIXMEs - * SDIO support - * Better Power management - * Handle MMC errors better - * double buffer support - * */ -#include <linux/delay.h> #include <linux/device.h> -#include <linux/dmaengine.h> -#include <linux/highmem.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/irq.h> #include <linux/mfd/core.h> #include <linux/mfd/tmio.h> #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/scatterlist.h> -#include <linux/workqueue.h> -#include <linux/spinlock.h> - -#define CTL_SD_CMD 0x00 -#define CTL_ARG_REG 0x04 -#define CTL_STOP_INTERNAL_ACTION 0x08 -#define CTL_XFER_BLK_COUNT 0xa -#define CTL_RESPONSE 0x0c -#define CTL_STATUS 0x1c -#define CTL_IRQ_MASK 0x20 -#define CTL_SD_CARD_CLK_CTL 0x24 -#define CTL_SD_XFER_LEN 0x26 -#define CTL_SD_MEM_CARD_OPT 0x28 -#define CTL_SD_ERROR_DETAIL_STATUS 0x2c -#define CTL_SD_DATA_PORT 0x30 -#define CTL_TRANSACTION_CTL 0x34 -#define CTL_SDIO_STATUS 0x36 -#define CTL_SDIO_IRQ_MASK 0x38 -#define CTL_RESET_SD 0xe0 -#define CTL_SDIO_REGS 0x100 -#define CTL_CLK_AND_WAIT_CTL 0x138 -#define CTL_RESET_SDIO 0x1e0 - -/* Definitions for values the CTRL_STATUS register can take. */ -#define TMIO_STAT_CMDRESPEND 0x00000001 -#define TMIO_STAT_DATAEND 0x00000004 -#define TMIO_STAT_CARD_REMOVE 0x00000008 -#define TMIO_STAT_CARD_INSERT 0x00000010 -#define TMIO_STAT_SIGSTATE 0x00000020 -#define TMIO_STAT_WRPROTECT 0x00000080 -#define TMIO_STAT_CARD_REMOVE_A 0x00000100 -#define TMIO_STAT_CARD_INSERT_A 0x00000200 -#define TMIO_STAT_SIGSTATE_A 0x00000400 -#define TMIO_STAT_CMD_IDX_ERR 0x00010000 -#define TMIO_STAT_CRCFAIL 0x00020000 -#define TMIO_STAT_STOPBIT_ERR 0x00040000 -#define TMIO_STAT_DATATIMEOUT 0x00080000 -#define TMIO_STAT_RXOVERFLOW 0x00100000 -#define TMIO_STAT_TXUNDERRUN 0x00200000 -#define TMIO_STAT_CMDTIMEOUT 0x00400000 -#define TMIO_STAT_RXRDY 0x01000000 -#define TMIO_STAT_TXRQ 0x02000000 -#define TMIO_STAT_ILL_FUNC 0x20000000 -#define TMIO_STAT_CMD_BUSY 0x40000000 -#define TMIO_STAT_ILL_ACCESS 0x80000000 - -/* Definitions for values the CTRL_SDIO_STATUS register can take. */ -#define TMIO_SDIO_STAT_IOIRQ 0x0001 -#define TMIO_SDIO_STAT_EXPUB52 0x4000 -#define TMIO_SDIO_STAT_EXWT 0x8000 -#define TMIO_SDIO_MASK_ALL 0xc007 - -/* Define some IRQ masks */ -/* This is the mask used at reset by the chip */ -#define TMIO_MASK_ALL 0x837f031d -#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) -#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) -#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ - TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) -#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) - -#define enable_mmc_irqs(host, i) \ - do { \ - u32 mask;\ - mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ - mask &= ~((i) & TMIO_MASK_IRQ); \ - sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ - } while (0) - -#define disable_mmc_irqs(host, i) \ - do { \ - u32 mask;\ - mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ - mask |= ((i) & TMIO_MASK_IRQ); \ - sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ - } while (0) - -#define ack_mmc_irqs(host, i) \ - do { \ - sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ - } while (0) - -/* This is arbitrary, just noone needed any higher alignment yet */ -#define MAX_ALIGN 4 - -struct tmio_mmc_host { - void __iomem *ctl; - unsigned long bus_shift; - struct mmc_command *cmd; - struct mmc_request *mrq; - struct mmc_data *data; - struct mmc_host *mmc; - int irq; - unsigned int sdio_irq_enabled; - - /* Callbacks for clock / power control */ - void (*set_pwr)(struct platform_device *host, int state); - void (*set_clk_div)(struct platform_device *host, int state); - - /* pio related stuff */ - struct scatterlist *sg_ptr; - struct scatterlist *sg_orig; - unsigned int sg_len; - unsigned int sg_off; - - struct platform_device *pdev; - - /* DMA support */ - struct dma_chan *chan_rx; - struct dma_chan *chan_tx; - struct tasklet_struct dma_complete; - struct tasklet_struct dma_issue; -#ifdef CONFIG_TMIO_MMC_DMA - unsigned int dma_sglen; - u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); - struct scatterlist bounce_sg; -#endif - - /* Track lost interrupts */ - struct delayed_work delayed_reset_work; - spinlock_t lock; - unsigned long last_req_ts; -}; - -static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); - -static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) -{ - return readw(host->ctl + (addr << host->bus_shift)); -} - -static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, - u16 *buf, int count) -{ - readsw(host->ctl + (addr << host->bus_shift), buf, count); -} - -static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) -{ - return readw(host->ctl + (addr << host->bus_shift)) | - readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; -} - -static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) -{ - writew(val, host->ctl + (addr << host->bus_shift)); -} - -static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, - u16 *buf, int count) -{ - writesw(host->ctl + (addr << host->bus_shift), buf, count); -} - -static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) -{ - writew(val, host->ctl + (addr << host->bus_shift)); - writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); -} - -static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) -{ - host->sg_len = data->sg_len; - host->sg_ptr = data->sg; - host->sg_orig = data->sg; - host->sg_off = 0; -} - -static int tmio_mmc_next_sg(struct tmio_mmc_host *host) -{ - host->sg_ptr = sg_next(host->sg_ptr); - host->sg_off = 0; - return --host->sg_len; -} - -static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) -{ - local_irq_save(*flags); - return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; -} - -static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags) -{ - kunmap_atomic(virt, KM_BIO_SRC_IRQ); - local_irq_restore(*flags); -} - -#ifdef CONFIG_MMC_DEBUG - -#define STATUS_TO_TEXT(a) \ - do { \ - if (status & TMIO_STAT_##a) \ - printk(#a); \ - } while (0) - -void pr_debug_status(u32 status) -{ - printk(KERN_DEBUG "status: %08x = ", status); - STATUS_TO_TEXT(CARD_REMOVE); - STATUS_TO_TEXT(CARD_INSERT); - STATUS_TO_TEXT(SIGSTATE); - STATUS_TO_TEXT(WRPROTECT); - STATUS_TO_TEXT(CARD_REMOVE_A); - STATUS_TO_TEXT(CARD_INSERT_A); - STATUS_TO_TEXT(SIGSTATE_A); - STATUS_TO_TEXT(CMD_IDX_ERR); - STATUS_TO_TEXT(STOPBIT_ERR); - STATUS_TO_TEXT(ILL_FUNC); - STATUS_TO_TEXT(CMD_BUSY); - STATUS_TO_TEXT(CMDRESPEND); - STATUS_TO_TEXT(DATAEND); - STATUS_TO_TEXT(CRCFAIL); - STATUS_TO_TEXT(DATATIMEOUT); - STATUS_TO_TEXT(CMDTIMEOUT); - STATUS_TO_TEXT(RXOVERFLOW); - STATUS_TO_TEXT(TXUNDERRUN); - STATUS_TO_TEXT(RXRDY); - STATUS_TO_TEXT(TXRQ); - STATUS_TO_TEXT(ILL_ACCESS); - printk("\n"); -} - -#else -#define pr_debug_status(s) do { } while (0) -#endif - -static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) -{ - struct tmio_mmc_host *host = mmc_priv(mmc); - - if (enable) { - host->sdio_irq_enabled = 1; - sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, - (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); - } else { - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); - sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); - host->sdio_irq_enabled = 0; - } -} - -static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) -{ - u32 clk = 0, clock; - - if (new_clock) { - for (clock = host->mmc->f_min, clk = 0x80000080; - new_clock >= (clock<<1); clk >>= 1) - clock <<= 1; - clk |= 0x100; - } - - if (host->set_clk_div) - host->set_clk_div(host->pdev, (clk>>22) & 1); - - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); -} - -static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) -{ - struct mfd_cell *cell = host->pdev->dev.platform_data; - struct tmio_mmc_data *pdata = cell->driver_data; - - /* - * Testing on sh-mobile showed that SDIO IRQs are unmasked when - * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the - * device IRQ here and restore the SDIO IRQ mask before - * re-enabling the device IRQ. - */ - if (pdata->flags & TMIO_MMC_SDIO_IRQ) - disable_irq(host->irq); - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); - msleep(10); - if (pdata->flags & TMIO_MMC_SDIO_IRQ) { - tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); - enable_irq(host->irq); - } - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - msleep(10); -} - -static void tmio_mmc_clk_start(struct tmio_mmc_host *host) -{ - struct mfd_cell *cell = host->pdev->dev.platform_data; - struct tmio_mmc_data *pdata = cell->driver_data; - - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); - msleep(10); - /* see comment in tmio_mmc_clk_stop above */ - if (pdata->flags & TMIO_MMC_SDIO_IRQ) - disable_irq(host->irq); - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); - msleep(10); - if (pdata->flags & TMIO_MMC_SDIO_IRQ) { - tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); - enable_irq(host->irq); - } -} - -static void reset(struct tmio_mmc_host *host) -{ - /* FIXME - should we set stop clock reg here */ - sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); - msleep(10); - sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); - msleep(10); -} - -static void tmio_mmc_reset_work(struct work_struct *work) -{ - struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, - delayed_reset_work.work); - struct mmc_request *mrq; - unsigned long flags; - - spin_lock_irqsave(&host->lock, flags); - mrq = host->mrq; - - /* request already finished */ - if (!mrq - || time_is_after_jiffies(host->last_req_ts + - msecs_to_jiffies(2000))) { - spin_unlock_irqrestore(&host->lock, flags); - return; - } - - dev_warn(&host->pdev->dev, - "timeout waiting for hardware interrupt (CMD%u)\n", - mrq->cmd->opcode); - - if (host->data) - host->data->error = -ETIMEDOUT; - else if (host->cmd) - host->cmd->error = -ETIMEDOUT; - else - mrq->cmd->error = -ETIMEDOUT; - - host->cmd = NULL; - host->data = NULL; - host->mrq = NULL; - - spin_unlock_irqrestore(&host->lock, flags); - - reset(host); - - mmc_request_done(host->mmc, mrq); -} - -static void -tmio_mmc_finish_request(struct tmio_mmc_host *host) -{ - struct mmc_request *mrq = host->mrq; - - if (!mrq) - return; - - host->mrq = NULL; - host->cmd = NULL; - host->data = NULL; - - cancel_delayed_work(&host->delayed_reset_work); - - mmc_request_done(host->mmc, mrq); -} - -/* These are the bitmasks the tmio chip requires to implement the MMC response - * types. Note that R1 and R6 are the same in this scheme. */ -#define APP_CMD 0x0040 -#define RESP_NONE 0x0300 -#define RESP_R1 0x0400 -#define RESP_R1B 0x0500 -#define RESP_R2 0x0600 -#define RESP_R3 0x0700 -#define DATA_PRESENT 0x0800 -#define TRANSFER_READ 0x1000 -#define TRANSFER_MULTI 0x2000 -#define SECURITY_CMD 0x4000 - -static int -tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) -{ - struct mmc_data *data = host->data; - int c = cmd->opcode; - - /* Command 12 is handled by hardware */ - if (cmd->opcode == 12 && !cmd->arg) { - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); - return 0; - } - - switch (mmc_resp_type(cmd)) { - case MMC_RSP_NONE: c |= RESP_NONE; break; - case MMC_RSP_R1: c |= RESP_R1; break; - case MMC_RSP_R1B: c |= RESP_R1B; break; - case MMC_RSP_R2: c |= RESP_R2; break; - case MMC_RSP_R3: c |= RESP_R3; break; - default: - pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); - return -EINVAL; - } - - host->cmd = cmd; - -/* FIXME - this seems to be ok commented out but the spec suggest this bit - * should be set when issuing app commands. - * if(cmd->flags & MMC_FLAG_ACMD) - * c |= APP_CMD; - */ - if (data) { - c |= DATA_PRESENT; - if (data->blocks > 1) { - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); - c |= TRANSFER_MULTI; - } - if (data->flags & MMC_DATA_READ) - c |= TRANSFER_READ; - } - - enable_mmc_irqs(host, TMIO_MASK_CMD); - - /* Fire off the command */ - sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); - sd_ctrl_write16(host, CTL_SD_CMD, c); - - return 0; -} - -/* - * This chip always returns (at least?) as much data as you ask for. - * I'm unsure what happens if you ask for less than a block. This should be - * looked into to ensure that a funny length read doesnt hose the controller. - */ -static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) -{ - struct mmc_data *data = host->data; - void *sg_virt; - unsigned short *buf; - unsigned int count; - unsigned long flags; - - if (!data) { - pr_debug("Spurious PIO IRQ\n"); - return; - } - - sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); - buf = (unsigned short *)(sg_virt + host->sg_off); - - count = host->sg_ptr->length - host->sg_off; - if (count > data->blksz) - count = data->blksz; - - pr_debug("count: %08x offset: %08x flags %08x\n", - count, host->sg_off, data->flags); - - /* Transfer the data */ - if (data->flags & MMC_DATA_READ) - sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); - else - sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); - - host->sg_off += count; - - tmio_mmc_kunmap_atomic(sg_virt, &flags); - - if (host->sg_off == host->sg_ptr->length) - tmio_mmc_next_sg(host); - - return; -} - -/* needs to be called with host->lock held */ -static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) -{ - struct mmc_data *data = host->data; - struct mmc_command *stop; - - host->data = NULL; - - if (!data) { - dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); - return; - } - stop = data->stop; - - /* FIXME - return correct transfer count on errors */ - if (!data->error) - data->bytes_xfered = data->blocks * data->blksz; - else - data->bytes_xfered = 0; - - pr_debug("Completed data request\n"); - - /* - * FIXME: other drivers allow an optional stop command of any given type - * which we dont do, as the chip can auto generate them. - * Perhaps we can be smarter about when to use auto CMD12 and - * only issue the auto request when we know this is the desired - * stop command, allowing fallback to the stop command the - * upper layers expect. For now, we do what works. - */ - - if (data->flags & MMC_DATA_READ) { - if (!host->chan_rx) - disable_mmc_irqs(host, TMIO_MASK_READOP); - else - tmio_check_bounce_buffer(host); - dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", - host->mrq); - } else { - if (!host->chan_tx) - disable_mmc_irqs(host, TMIO_MASK_WRITEOP); - dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", - host->mrq); - } - - if (stop) { - if (stop->opcode == 12 && !stop->arg) - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); - else - BUG(); - } - - tmio_mmc_finish_request(host); -} - -static void tmio_mmc_data_irq(struct tmio_mmc_host *host) -{ - struct mmc_data *data; - spin_lock(&host->lock); - data = host->data; - - if (!data) - goto out; - - if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { - /* - * Has all data been written out yet? Testing on SuperH showed, - * that in most cases the first interrupt comes already with the - * BUSY status bit clear, but on some operations, like mount or - * in the beginning of a write / sync / umount, there is one - * DATAEND interrupt with the BUSY bit set, in this cases - * waiting for one more interrupt fixes the problem. - */ - if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { - disable_mmc_irqs(host, TMIO_STAT_DATAEND); - tasklet_schedule(&host->dma_complete); - } - } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { - disable_mmc_irqs(host, TMIO_STAT_DATAEND); - tasklet_schedule(&host->dma_complete); - } else { - tmio_mmc_do_data_irq(host); - } -out: - spin_unlock(&host->lock); -} - -static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, - unsigned int stat) -{ - struct mmc_command *cmd = host->cmd; - int i, addr; - - spin_lock(&host->lock); - - if (!host->cmd) { - pr_debug("Spurious CMD irq\n"); - goto out; - } - - host->cmd = NULL; - - /* This controller is sicker than the PXA one. Not only do we need to - * drop the top 8 bits of the first response word, we also need to - * modify the order of the response for short response command types. - */ - - for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) - cmd->resp[i] = sd_ctrl_read32(host, addr); - - if (cmd->flags & MMC_RSP_136) { - cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); - cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); - cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); - cmd->resp[3] <<= 8; - } else if (cmd->flags & MMC_RSP_R3) { - cmd->resp[0] = cmd->resp[3]; - } - - if (stat & TMIO_STAT_CMDTIMEOUT) - cmd->error = -ETIMEDOUT; - else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) - cmd->error = -EILSEQ; - - /* If there is data to handle we enable data IRQs here, and - * we will ultimatley finish the request in the data_end handler. - * If theres no data or we encountered an error, finish now. - */ - if (host->data && !cmd->error) { - if (host->data->flags & MMC_DATA_READ) { - if (!host->chan_rx) - enable_mmc_irqs(host, TMIO_MASK_READOP); - } else { - if (!host->chan_tx) - enable_mmc_irqs(host, TMIO_MASK_WRITEOP); - else - tasklet_schedule(&host->dma_issue); - } - } else { - tmio_mmc_finish_request(host); - } - -out: - spin_unlock(&host->lock); - - return; -} - -static irqreturn_t tmio_mmc_irq(int irq, void *devid) -{ - struct tmio_mmc_host *host = devid; - struct mfd_cell *cell = host->pdev->dev.platform_data; - struct tmio_mmc_data *pdata = cell->driver_data; - unsigned int ireg, irq_mask, status; - unsigned int sdio_ireg, sdio_irq_mask, sdio_status; - - pr_debug("MMC IRQ begin\n"); - - status = sd_ctrl_read32(host, CTL_STATUS); - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); - ireg = status & TMIO_MASK_IRQ & ~irq_mask; - - sdio_ireg = 0; - if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { - sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); - sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); - sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; - - sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); - - if (sdio_ireg && !host->sdio_irq_enabled) { - pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", - sdio_status, sdio_irq_mask, sdio_ireg); - tmio_mmc_enable_sdio_irq(host->mmc, 0); - goto out; - } - if (host->mmc->caps & MMC_CAP_SDIO_IRQ && - sdio_ireg & TMIO_SDIO_STAT_IOIRQ) - mmc_signal_sdio_irq(host->mmc); - - if (sdio_ireg) - goto out; - } - - pr_debug_status(status); - pr_debug_status(ireg); - - if (!ireg) { - disable_mmc_irqs(host, status & ~irq_mask); - - pr_warning("tmio_mmc: Spurious irq, disabling! " - "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); - pr_debug_status(status); - - goto out; - } - - while (ireg) { - /* Card insert / remove attempts */ - if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { - ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | - TMIO_STAT_CARD_REMOVE); - mmc_detect_change(host->mmc, msecs_to_jiffies(100)); - } - - /* CRC and other errors */ -/* if (ireg & TMIO_STAT_ERR_IRQ) - * handled |= tmio_error_irq(host, irq, stat); - */ - - /* Command completion */ - if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { - ack_mmc_irqs(host, - TMIO_STAT_CMDRESPEND | - TMIO_STAT_CMDTIMEOUT); - tmio_mmc_cmd_irq(host, status); - } - - /* Data transfer */ - if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { - ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); - tmio_mmc_pio_irq(host); - } - - /* Data transfer completion */ - if (ireg & TMIO_STAT_DATAEND) { - ack_mmc_irqs(host, TMIO_STAT_DATAEND); - tmio_mmc_data_irq(host); - } - - /* Check status - keep going until we've handled it all */ - status = sd_ctrl_read32(host, CTL_STATUS); - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); - ireg = status & TMIO_MASK_IRQ & ~irq_mask; - - pr_debug("Status at end of loop: %08x\n", status); - pr_debug_status(status); - } - pr_debug("MMC IRQ end\n"); - -out: - return IRQ_HANDLED; -} - -#ifdef CONFIG_TMIO_MMC_DMA -static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) -{ - if (host->sg_ptr == &host->bounce_sg) { - unsigned long flags; - void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); - memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); - tmio_mmc_kunmap_atomic(sg_vaddr, &flags); - } -} - -static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) -{ -#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) - /* Switch DMA mode on or off - SuperH specific? */ - sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); -#endif -} - -static void tmio_dma_complete(void *arg) -{ - struct tmio_mmc_host *host = arg; - - dev_dbg(&host->pdev->dev, "Command completed\n"); - - if (!host->data) - dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); - else - enable_mmc_irqs(host, TMIO_STAT_DATAEND); -} - -static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) -{ - struct scatterlist *sg = host->sg_ptr, *sg_tmp; - struct dma_async_tx_descriptor *desc = NULL; - struct dma_chan *chan = host->chan_rx; - struct mfd_cell *cell = host->pdev->dev.platform_data; - struct tmio_mmc_data *pdata = cell->driver_data; - dma_cookie_t cookie; - int ret, i; - bool aligned = true, multiple = true; - unsigned int align = (1 << pdata->dma->alignment_shift) - 1; - - for_each_sg(sg, sg_tmp, host->sg_len, i) { - if (sg_tmp->offset & align) - aligned = false; - if (sg_tmp->length & align) { - multiple = false; - break; - } - } - - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || - align >= MAX_ALIGN)) || !multiple) { - ret = -EINVAL; - goto pio; - } - - /* The only sg element can be unaligned, use our bounce buffer then */ - if (!aligned) { - sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); - host->sg_ptr = &host->bounce_sg; - sg = host->sg_ptr; - } - - ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); - if (ret > 0) { - host->dma_sglen = ret; - desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - } - - if (desc) { - desc->callback = tmio_dma_complete; - desc->callback_param = host; - cookie = desc->tx_submit(desc); - if (cookie < 0) { - desc = NULL; - ret = cookie; - } else { - chan->device->device_issue_pending(chan); - } - } - dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", - __func__, host->sg_len, ret, cookie, host->mrq); - -pio: - if (!desc) { - /* DMA failed, fall back to PIO */ - if (ret >= 0) - ret = -EIO; - host->chan_rx = NULL; - dma_release_channel(chan); - /* Free the Tx channel too */ - chan = host->chan_tx; - if (chan) { - host->chan_tx = NULL; - dma_release_channel(chan); - } - dev_warn(&host->pdev->dev, - "DMA failed: %d, falling back to PIO\n", ret); - tmio_mmc_enable_dma(host, false); - } - - dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, - desc, cookie, host->sg_len); -} - -static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) -{ - struct scatterlist *sg = host->sg_ptr, *sg_tmp; - struct dma_async_tx_descriptor *desc = NULL; - struct dma_chan *chan = host->chan_tx; - struct mfd_cell *cell = host->pdev->dev.platform_data; - struct tmio_mmc_data *pdata = cell->driver_data; - dma_cookie_t cookie; - int ret, i; - bool aligned = true, multiple = true; - unsigned int align = (1 << pdata->dma->alignment_shift) - 1; - - for_each_sg(sg, sg_tmp, host->sg_len, i) { - if (sg_tmp->offset & align) - aligned = false; - if (sg_tmp->length & align) { - multiple = false; - break; - } - } - - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || - align >= MAX_ALIGN)) || !multiple) { - ret = -EINVAL; - goto pio; - } - - /* The only sg element can be unaligned, use our bounce buffer then */ - if (!aligned) { - unsigned long flags; - void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); - sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); - memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); - tmio_mmc_kunmap_atomic(sg_vaddr, &flags); - host->sg_ptr = &host->bounce_sg; - sg = host->sg_ptr; - } - - ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); - if (ret > 0) { - host->dma_sglen = ret; - desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - } - - if (desc) { - desc->callback = tmio_dma_complete; - desc->callback_param = host; - cookie = desc->tx_submit(desc); - if (cookie < 0) { - desc = NULL; - ret = cookie; - } - } - dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", - __func__, host->sg_len, ret, cookie, host->mrq); - -pio: - if (!desc) { - /* DMA failed, fall back to PIO */ - if (ret >= 0) - ret = -EIO; - host->chan_tx = NULL; - dma_release_channel(chan); - /* Free the Rx channel too */ - chan = host->chan_rx; - if (chan) { - host->chan_rx = NULL; - dma_release_channel(chan); - } - dev_warn(&host->pdev->dev, - "DMA failed: %d, falling back to PIO\n", ret); - tmio_mmc_enable_dma(host, false); - } - - dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, - desc, cookie); -} - -static void tmio_mmc_start_dma(struct tmio_mmc_host *host, - struct mmc_data *data) -{ - if (data->flags & MMC_DATA_READ) { - if (host->chan_rx) - tmio_mmc_start_dma_rx(host); - } else { - if (host->chan_tx) - tmio_mmc_start_dma_tx(host); - } -} - -static void tmio_issue_tasklet_fn(unsigned long priv) -{ - struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; - struct dma_chan *chan = host->chan_tx; - - chan->device->device_issue_pending(chan); -} - -static void tmio_tasklet_fn(unsigned long arg) -{ - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; - unsigned long flags; - - spin_lock_irqsave(&host->lock, flags); - - if (!host->data) - goto out; - - if (host->data->flags & MMC_DATA_READ) - dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, - DMA_FROM_DEVICE); - else - dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, - DMA_TO_DEVICE); - - tmio_mmc_do_data_irq(host); -out: - spin_unlock_irqrestore(&host->lock, flags); -} - -/* It might be necessary to make filter MFD specific */ -static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) -{ - dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); - chan->private = arg; - return true; -} - -static void tmio_mmc_request_dma(struct tmio_mmc_host *host, - struct tmio_mmc_data *pdata) -{ - /* We can only either use DMA for both Tx and Rx or not use it at all */ - if (pdata->dma) { - dma_cap_mask_t mask; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, - pdata->dma->chan_priv_tx); - dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, - host->chan_tx); - - if (!host->chan_tx) - return; - - host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, - pdata->dma->chan_priv_rx); - dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, - host->chan_rx); - - if (!host->chan_rx) { - dma_release_channel(host->chan_tx); - host->chan_tx = NULL; - return; - } - - tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); - tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); - - tmio_mmc_enable_dma(host, true); - } -} - -static void tmio_mmc_release_dma(struct tmio_mmc_host *host) -{ - if (host->chan_tx) { - struct dma_chan *chan = host->chan_tx; - host->chan_tx = NULL; - dma_release_channel(chan); - } - if (host->chan_rx) { - struct dma_chan *chan = host->chan_rx; - host->chan_rx = NULL; - dma_release_channel(chan); - } -} -#else -static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) -{ -} - -static void tmio_mmc_start_dma(struct tmio_mmc_host *host, - struct mmc_data *data) -{ -} - -static void tmio_mmc_request_dma(struct tmio_mmc_host *host, - struct tmio_mmc_data *pdata) -{ - host->chan_tx = NULL; - host->chan_rx = NULL; -} - -static void tmio_mmc_release_dma(struct tmio_mmc_host *host) -{ -} -#endif - -static int tmio_mmc_start_data(struct tmio_mmc_host *host, - struct mmc_data *data) -{ - struct mfd_cell *cell = host->pdev->dev.platform_data; - struct tmio_mmc_data *pdata = cell->driver_data; - - pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", - data->blksz, data->blocks); - - /* Some hardware cannot perform 2 byte requests in 4 bit mode */ - if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { - int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; - - if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { - pr_err("%s: %d byte block unsupported in 4 bit mode\n", - mmc_hostname(host->mmc), data->blksz); - return -EINVAL; - } - } - - tmio_mmc_init_sg(host, data); - host->data = data; - - /* Set transfer length / blocksize */ - sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); - - tmio_mmc_start_dma(host, data); - - return 0; -} - -/* Process requests from the MMC layer */ -static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) -{ - struct tmio_mmc_host *host = mmc_priv(mmc); - int ret; - - if (host->mrq) - pr_debug("request not null\n"); - - host->last_req_ts = jiffies; - wmb(); - host->mrq = mrq; - - if (mrq->data) { - ret = tmio_mmc_start_data(host, mrq->data); - if (ret) - goto fail; - } - - ret = tmio_mmc_start_command(host, mrq->cmd); - if (!ret) { - schedule_delayed_work(&host->delayed_reset_work, - msecs_to_jiffies(2000)); - return; - } - -fail: - host->mrq = NULL; - mrq->cmd->error = ret; - mmc_request_done(mmc, mrq); -} - -/* Set MMC clock / power. - * Note: This controller uses a simple divider scheme therefore it cannot - * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as - * MMC wont run that fast, it has to be clocked at 12MHz which is the next - * slowest setting. - */ -static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) -{ - struct tmio_mmc_host *host = mmc_priv(mmc); - - if (ios->clock) - tmio_mmc_set_clock(host, ios->clock); - - /* Power sequence - OFF -> ON -> UP */ - switch (ios->power_mode) { - case MMC_POWER_OFF: /* power down SD bus */ - if (host->set_pwr) - host->set_pwr(host->pdev, 0); - tmio_mmc_clk_stop(host); - break; - case MMC_POWER_ON: /* power up SD bus */ - if (host->set_pwr) - host->set_pwr(host->pdev, 1); - break; - case MMC_POWER_UP: /* start bus clock */ - tmio_mmc_clk_start(host); - break; - } - - switch (ios->bus_width) { - case MMC_BUS_WIDTH_1: - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); - break; - case MMC_BUS_WIDTH_4: - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); - break; - } - - /* Let things settle. delay taken from winCE driver */ - udelay(140); -} - -static int tmio_mmc_get_ro(struct mmc_host *mmc) -{ - struct tmio_mmc_host *host = mmc_priv(mmc); - struct mfd_cell *cell = host->pdev->dev.platform_data; - struct tmio_mmc_data *pdata = cell->driver_data; - - return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || - (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; -} - -static int tmio_mmc_get_cd(struct mmc_host *mmc) -{ - struct tmio_mmc_host *host = mmc_priv(mmc); - struct mfd_cell *cell = host->pdev->dev.platform_data; - struct tmio_mmc_data *pdata = cell->driver_data; - - if (!pdata->get_cd) - return -ENOSYS; - else - return pdata->get_cd(host->pdev); -} - -static const struct mmc_host_ops tmio_mmc_ops = { - .request = tmio_mmc_request, - .set_ios = tmio_mmc_set_ios, - .get_ro = tmio_mmc_get_ro, - .get_cd = tmio_mmc_get_cd, - .enable_sdio_irq = tmio_mmc_enable_sdio_irq, -}; +#include "tmio_mmc.h" #ifdef CONFIG_PM static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; + const struct mfd_cell *cell = mfd_get_cell(dev); struct mmc_host *mmc = platform_get_drvdata(dev); int ret; @@ -1222,7 +41,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) static int tmio_mmc_resume(struct platform_device *dev) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; + const struct mfd_cell *cell = mfd_get_cell(dev); struct mmc_host *mmc = platform_get_drvdata(dev); int ret = 0; @@ -1243,138 +62,54 @@ out: #define tmio_mmc_resume NULL #endif -static int __devinit tmio_mmc_probe(struct platform_device *dev) +static int __devinit tmio_mmc_probe(struct platform_device *pdev) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; + const struct mfd_cell *cell = mfd_get_cell(pdev); struct tmio_mmc_data *pdata; - struct resource *res_ctl; struct tmio_mmc_host *host; - struct mmc_host *mmc; int ret = -EINVAL; - u32 irq_mask = TMIO_MASK_CMD; - if (dev->num_resources != 2) + if (pdev->num_resources != 2) goto out; - res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!res_ctl) - goto out; - - pdata = cell->driver_data; + pdata = mfd_get_data(pdev); if (!pdata || !pdata->hclk) goto out; - ret = -ENOMEM; - - mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); - if (!mmc) - goto out; - - host = mmc_priv(mmc); - host->mmc = mmc; - host->pdev = dev; - platform_set_drvdata(dev, mmc); - - host->set_pwr = pdata->set_pwr; - host->set_clk_div = pdata->set_clk_div; - - /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ - host->bus_shift = resource_size(res_ctl) >> 10; - - host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); - if (!host->ctl) - goto host_free; - - mmc->ops = &tmio_mmc_ops; - mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; - mmc->f_max = pdata->hclk; - mmc->f_min = mmc->f_max / 512; - mmc->max_segs = 32; - mmc->max_blk_size = 512; - mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * - mmc->max_segs; - mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; - mmc->max_seg_size = mmc->max_req_size; - if (pdata->ocr_mask) - mmc->ocr_avail = pdata->ocr_mask; - else - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - /* Tell the MFD core we are ready to be enabled */ if (cell->enable) { - ret = cell->enable(dev); + ret = cell->enable(pdev); if (ret) - goto unmap_ctl; + goto out; } - tmio_mmc_clk_stop(host); - reset(host); - - ret = platform_get_irq(dev, 0); - if (ret >= 0) - host->irq = ret; - else - goto cell_disable; - - disable_mmc_irqs(host, TMIO_MASK_ALL); - if (pdata->flags & TMIO_MMC_SDIO_IRQ) - tmio_mmc_enable_sdio_irq(mmc, 0); - - ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | - IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); + ret = tmio_mmc_host_probe(&host, pdev, pdata); if (ret) goto cell_disable; - spin_lock_init(&host->lock); - - /* Init delayed work for request timeouts */ - INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work); - - /* See if we also get DMA */ - tmio_mmc_request_dma(host, pdata); - - mmc_add_host(mmc); - pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), (unsigned long)host->ctl, host->irq); - /* Unmask the IRQs we want to know about */ - if (!host->chan_rx) - irq_mask |= TMIO_MASK_READOP; - if (!host->chan_tx) - irq_mask |= TMIO_MASK_WRITEOP; - enable_mmc_irqs(host, irq_mask); - return 0; cell_disable: if (cell->disable) - cell->disable(dev); -unmap_ctl: - iounmap(host->ctl); -host_free: - mmc_free_host(mmc); + cell->disable(pdev); out: return ret; } -static int __devexit tmio_mmc_remove(struct platform_device *dev) +static int __devexit tmio_mmc_remove(struct platform_device *pdev) { - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; - struct mmc_host *mmc = platform_get_drvdata(dev); + const struct mfd_cell *cell = mfd_get_cell(pdev); + struct mmc_host *mmc = platform_get_drvdata(pdev); - platform_set_drvdata(dev, NULL); + platform_set_drvdata(pdev, NULL); if (mmc) { - struct tmio_mmc_host *host = mmc_priv(mmc); - mmc_remove_host(mmc); - cancel_delayed_work_sync(&host->delayed_reset_work); - tmio_mmc_release_dma(host); - free_irq(host->irq, host); + tmio_mmc_host_remove(mmc_priv(mmc)); if (cell->disable) - cell->disable(dev); - iounmap(host->ctl); - mmc_free_host(mmc); + cell->disable(pdev); } return 0; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h new file mode 100644 index 000000000000..099ed49a259b --- /dev/null +++ b/drivers/mmc/host/tmio_mmc.h @@ -0,0 +1,123 @@ +/* + * linux/drivers/mmc/host/tmio_mmc.h + * + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for the MMC / SD / SDIO cell found in: + * + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 + */ + +#ifndef TMIO_MMC_H +#define TMIO_MMC_H + +#include <linux/highmem.h> +#include <linux/mmc/tmio.h> +#include <linux/pagemap.h> + +/* Definitions for values the CTRL_SDIO_STATUS register can take. */ +#define TMIO_SDIO_STAT_IOIRQ 0x0001 +#define TMIO_SDIO_STAT_EXPUB52 0x4000 +#define TMIO_SDIO_STAT_EXWT 0x8000 +#define TMIO_SDIO_MASK_ALL 0xc007 + +/* Define some IRQ masks */ +/* This is the mask used at reset by the chip */ +#define TMIO_MASK_ALL 0x837f031d +#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) +#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) +#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ + TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) +#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) + +struct tmio_mmc_data; + +struct tmio_mmc_host { + void __iomem *ctl; + unsigned long bus_shift; + struct mmc_command *cmd; + struct mmc_request *mrq; + struct mmc_data *data; + struct mmc_host *mmc; + int irq; + unsigned int sdio_irq_enabled; + + /* Callbacks for clock / power control */ + void (*set_pwr)(struct platform_device *host, int state); + void (*set_clk_div)(struct platform_device *host, int state); + + /* pio related stuff */ + struct scatterlist *sg_ptr; + struct scatterlist *sg_orig; + unsigned int sg_len; + unsigned int sg_off; + + struct platform_device *pdev; + struct tmio_mmc_data *pdata; + + /* DMA support */ + bool force_pio; + struct dma_chan *chan_rx; + struct dma_chan *chan_tx; + struct tasklet_struct dma_complete; + struct tasklet_struct dma_issue; + struct scatterlist bounce_sg; + u8 *bounce_buf; + + /* Track lost interrupts */ + struct delayed_work delayed_reset_work; + spinlock_t lock; + unsigned long last_req_ts; +}; + +int tmio_mmc_host_probe(struct tmio_mmc_host **host, + struct platform_device *pdev, + struct tmio_mmc_data *pdata); +void tmio_mmc_host_remove(struct tmio_mmc_host *host); +void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); + +void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); +void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); + +static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, + unsigned long *flags) +{ + local_irq_save(*flags); + return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; +} + +static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, + unsigned long *flags, void *virt) +{ + kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); + local_irq_restore(*flags); +} + +#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) +void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); +void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); +void tmio_mmc_release_dma(struct tmio_mmc_host *host); +#else +static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ +} + +static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + host->chan_tx = NULL; + host->chan_rx = NULL; +} + +static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) +{ +} +#endif + +#endif diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c new file mode 100644 index 000000000000..d3de74ab633e --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -0,0 +1,317 @@ +/* + * linux/drivers/mmc/tmio_mmc_dma.c + * + * Copyright (C) 2010-2011 Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * DMA function for TMIO MMC implementations + */ + +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/mfd/tmio.h> +#include <linux/mmc/host.h> +#include <linux/mmc/tmio.h> +#include <linux/pagemap.h> +#include <linux/scatterlist.h> + +#include "tmio_mmc.h" + +#define TMIO_MMC_MIN_DMA_LEN 8 + +static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) +{ +#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) + /* Switch DMA mode on or off - SuperH specific? */ + writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); +#endif +} + +static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) +{ + struct scatterlist *sg = host->sg_ptr, *sg_tmp; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan = host->chan_rx; + struct tmio_mmc_data *pdata = host->pdata; + dma_cookie_t cookie; + int ret, i; + bool aligned = true, multiple = true; + unsigned int align = (1 << pdata->dma->alignment_shift) - 1; + + for_each_sg(sg, sg_tmp, host->sg_len, i) { + if (sg_tmp->offset & align) + aligned = false; + if (sg_tmp->length & align) { + multiple = false; + break; + } + } + + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || + (align & PAGE_MASK))) || !multiple) { + ret = -EINVAL; + goto pio; + } + + if (sg->length < TMIO_MMC_MIN_DMA_LEN) { + host->force_pio = true; + return; + } + + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); + + /* The only sg element can be unaligned, use our bounce buffer then */ + if (!aligned) { + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); + host->sg_ptr = &host->bounce_sg; + sg = host->sg_ptr; + } + + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); + if (ret > 0) + desc = chan->device->device_prep_slave_sg(chan, sg, ret, + DMA_FROM_DEVICE, DMA_CTRL_ACK); + + if (desc) { + cookie = dmaengine_submit(desc); + if (cookie < 0) { + desc = NULL; + ret = cookie; + } + } + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", + __func__, host->sg_len, ret, cookie, host->mrq); + +pio: + if (!desc) { + /* DMA failed, fall back to PIO */ + if (ret >= 0) + ret = -EIO; + host->chan_rx = NULL; + dma_release_channel(chan); + /* Free the Tx channel too */ + chan = host->chan_tx; + if (chan) { + host->chan_tx = NULL; + dma_release_channel(chan); + } + dev_warn(&host->pdev->dev, + "DMA failed: %d, falling back to PIO\n", ret); + tmio_mmc_enable_dma(host, false); + } + + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, + desc, cookie, host->sg_len); +} + +static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) +{ + struct scatterlist *sg = host->sg_ptr, *sg_tmp; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan = host->chan_tx; + struct tmio_mmc_data *pdata = host->pdata; + dma_cookie_t cookie; + int ret, i; + bool aligned = true, multiple = true; + unsigned int align = (1 << pdata->dma->alignment_shift) - 1; + + for_each_sg(sg, sg_tmp, host->sg_len, i) { + if (sg_tmp->offset & align) + aligned = false; + if (sg_tmp->length & align) { + multiple = false; + break; + } + } + + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || + (align & PAGE_MASK))) || !multiple) { + ret = -EINVAL; + goto pio; + } + + if (sg->length < TMIO_MMC_MIN_DMA_LEN) { + host->force_pio = true; + return; + } + + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); + + /* The only sg element can be unaligned, use our bounce buffer then */ + if (!aligned) { + unsigned long flags; + void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); + memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); + tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); + host->sg_ptr = &host->bounce_sg; + sg = host->sg_ptr; + } + + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); + if (ret > 0) + desc = chan->device->device_prep_slave_sg(chan, sg, ret, + DMA_TO_DEVICE, DMA_CTRL_ACK); + + if (desc) { + cookie = dmaengine_submit(desc); + if (cookie < 0) { + desc = NULL; + ret = cookie; + } + } + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", + __func__, host->sg_len, ret, cookie, host->mrq); + +pio: + if (!desc) { + /* DMA failed, fall back to PIO */ + if (ret >= 0) + ret = -EIO; + host->chan_tx = NULL; + dma_release_channel(chan); + /* Free the Rx channel too */ + chan = host->chan_rx; + if (chan) { + host->chan_rx = NULL; + dma_release_channel(chan); + } + dev_warn(&host->pdev->dev, + "DMA failed: %d, falling back to PIO\n", ret); + tmio_mmc_enable_dma(host, false); + } + + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, + desc, cookie); +} + +void tmio_mmc_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + if (data->flags & MMC_DATA_READ) { + if (host->chan_rx) + tmio_mmc_start_dma_rx(host); + } else { + if (host->chan_tx) + tmio_mmc_start_dma_tx(host); + } +} + +static void tmio_mmc_issue_tasklet_fn(unsigned long priv) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; + struct dma_chan *chan = NULL; + + spin_lock_irq(&host->lock); + + if (host && host->data) { + if (host->data->flags & MMC_DATA_READ) + chan = host->chan_rx; + else + chan = host->chan_tx; + } + + spin_unlock_irq(&host->lock); + + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + + if (chan) + dma_async_issue_pending(chan); +} + +static void tmio_mmc_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + spin_lock_irq(&host->lock); + + if (!host->data) + goto out; + + if (host->data->flags & MMC_DATA_READ) + dma_unmap_sg(host->chan_rx->device->dev, + host->sg_ptr, host->sg_len, + DMA_FROM_DEVICE); + else + dma_unmap_sg(host->chan_tx->device->dev, + host->sg_ptr, host->sg_len, + DMA_TO_DEVICE); + + tmio_mmc_do_data_irq(host); +out: + spin_unlock_irq(&host->lock); +} + +/* It might be necessary to make filter MFD specific */ +static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) +{ + dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); + chan->private = arg; + return true; +} + +void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) +{ + /* We can only either use DMA for both Tx and Rx or not use it at all */ + if (pdata->dma) { + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, + pdata->dma->chan_priv_tx); + dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, + host->chan_tx); + + if (!host->chan_tx) + return; + + host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, + pdata->dma->chan_priv_rx); + dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, + host->chan_rx); + + if (!host->chan_rx) + goto ereqrx; + + host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); + if (!host->bounce_buf) + goto ebouncebuf; + + tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); + tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); + + tmio_mmc_enable_dma(host, true); + + return; +ebouncebuf: + dma_release_channel(host->chan_rx); + host->chan_rx = NULL; +ereqrx: + dma_release_channel(host->chan_tx); + host->chan_tx = NULL; + return; + } +} + +void tmio_mmc_release_dma(struct tmio_mmc_host *host) +{ + if (host->chan_tx) { + struct dma_chan *chan = host->chan_tx; + host->chan_tx = NULL; + dma_release_channel(chan); + } + if (host->chan_rx) { + struct dma_chan *chan = host->chan_rx; + host->chan_rx = NULL; + dma_release_channel(chan); + } + if (host->bounce_buf) { + free_pages((unsigned long)host->bounce_buf, 0); + host->bounce_buf = NULL; + } +} diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c new file mode 100644 index 000000000000..710339a85c84 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -0,0 +1,897 @@ +/* + * linux/drivers/mmc/host/tmio_mmc_pio.c + * + * Copyright (C) 2011 Guennadi Liakhovetski + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for the MMC / SD / SDIO IP found in: + * + * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs + * + * This driver draws mainly on scattered spec sheets, Reverse engineering + * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit + * support). (Further 4 bit support from a later datasheet). + * + * TODO: + * Investigate using a workqueue for PIO transfers + * Eliminate FIXMEs + * SDIO support + * Better Power management + * Handle MMC errors better + * double buffer support + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/highmem.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/mfd/tmio.h> +#include <linux/mmc/host.h> +#include <linux/mmc/tmio.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> + +#include "tmio_mmc.h" + +static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) +{ + return readw(host->ctl + (addr << host->bus_shift)); +} + +static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, + u16 *buf, int count) +{ + readsw(host->ctl + (addr << host->bus_shift), buf, count); +} + +static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) +{ + return readw(host->ctl + (addr << host->bus_shift)) | + readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; +} + +static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) +{ + writew(val, host->ctl + (addr << host->bus_shift)); +} + +static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, + u16 *buf, int count) +{ + writesw(host->ctl + (addr << host->bus_shift), buf, count); +} + +static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) +{ + writew(val, host->ctl + (addr << host->bus_shift)); + writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); +} + +void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ + u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); + sd_ctrl_write32(host, CTL_IRQ_MASK, mask); +} + +void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ + u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); + sd_ctrl_write32(host, CTL_IRQ_MASK, mask); +} + +static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ + sd_ctrl_write32(host, CTL_STATUS, ~i); +} + +static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) +{ + host->sg_len = data->sg_len; + host->sg_ptr = data->sg; + host->sg_orig = data->sg; + host->sg_off = 0; +} + +static int tmio_mmc_next_sg(struct tmio_mmc_host *host) +{ + host->sg_ptr = sg_next(host->sg_ptr); + host->sg_off = 0; + return --host->sg_len; +} + +#ifdef CONFIG_MMC_DEBUG + +#define STATUS_TO_TEXT(a, status, i) \ + do { \ + if (status & TMIO_STAT_##a) { \ + if (i++) \ + printk(" | "); \ + printk(#a); \ + } \ + } while (0) + +static void pr_debug_status(u32 status) +{ + int i = 0; + printk(KERN_DEBUG "status: %08x = ", status); + STATUS_TO_TEXT(CARD_REMOVE, status, i); + STATUS_TO_TEXT(CARD_INSERT, status, i); + STATUS_TO_TEXT(SIGSTATE, status, i); + STATUS_TO_TEXT(WRPROTECT, status, i); + STATUS_TO_TEXT(CARD_REMOVE_A, status, i); + STATUS_TO_TEXT(CARD_INSERT_A, status, i); + STATUS_TO_TEXT(SIGSTATE_A, status, i); + STATUS_TO_TEXT(CMD_IDX_ERR, status, i); + STATUS_TO_TEXT(STOPBIT_ERR, status, i); + STATUS_TO_TEXT(ILL_FUNC, status, i); + STATUS_TO_TEXT(CMD_BUSY, status, i); + STATUS_TO_TEXT(CMDRESPEND, status, i); + STATUS_TO_TEXT(DATAEND, status, i); + STATUS_TO_TEXT(CRCFAIL, status, i); + STATUS_TO_TEXT(DATATIMEOUT, status, i); + STATUS_TO_TEXT(CMDTIMEOUT, status, i); + STATUS_TO_TEXT(RXOVERFLOW, status, i); + STATUS_TO_TEXT(TXUNDERRUN, status, i); + STATUS_TO_TEXT(RXRDY, status, i); + STATUS_TO_TEXT(TXRQ, status, i); + STATUS_TO_TEXT(ILL_ACCESS, status, i); + printk("\n"); +} + +#else +#define pr_debug_status(s) do { } while (0) +#endif + +static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + + if (enable) { + host->sdio_irq_enabled = 1; + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, + (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); + } else { + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); + host->sdio_irq_enabled = 0; + } +} + +static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) +{ + u32 clk = 0, clock; + + if (new_clock) { + for (clock = host->mmc->f_min, clk = 0x80000080; + new_clock >= (clock<<1); clk >>= 1) + clock <<= 1; + clk |= 0x100; + } + + if (host->set_clk_div) + host->set_clk_div(host->pdev, (clk>>22) & 1); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); +} + +static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) +{ + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); + + /* implicit BUG_ON(!res) */ + if (resource_size(res) > 0x100) { + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); + msleep(10); + } + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + msleep(10); +} + +static void tmio_mmc_clk_start(struct tmio_mmc_host *host) +{ + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); + + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); + msleep(10); + + /* implicit BUG_ON(!res) */ + if (resource_size(res) > 0x100) { + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); + msleep(10); + } +} + +static void tmio_mmc_reset(struct tmio_mmc_host *host) +{ + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); + + /* FIXME - should we set stop clock reg here */ + sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); + /* implicit BUG_ON(!res) */ + if (resource_size(res) > 0x100) + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); + msleep(10); + sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); + if (resource_size(res) > 0x100) + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); + msleep(10); +} + +static void tmio_mmc_reset_work(struct work_struct *work) +{ + struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, + delayed_reset_work.work); + struct mmc_request *mrq; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + mrq = host->mrq; + + /* request already finished */ + if (!mrq + || time_is_after_jiffies(host->last_req_ts + + msecs_to_jiffies(2000))) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + dev_warn(&host->pdev->dev, + "timeout waiting for hardware interrupt (CMD%u)\n", + mrq->cmd->opcode); + + if (host->data) + host->data->error = -ETIMEDOUT; + else if (host->cmd) + host->cmd->error = -ETIMEDOUT; + else + mrq->cmd->error = -ETIMEDOUT; + + host->cmd = NULL; + host->data = NULL; + host->mrq = NULL; + host->force_pio = false; + + spin_unlock_irqrestore(&host->lock, flags); + + tmio_mmc_reset(host); + + mmc_request_done(host->mmc, mrq); +} + +static void tmio_mmc_finish_request(struct tmio_mmc_host *host) +{ + struct mmc_request *mrq = host->mrq; + + if (!mrq) + return; + + host->mrq = NULL; + host->cmd = NULL; + host->data = NULL; + host->force_pio = false; + + cancel_delayed_work(&host->delayed_reset_work); + + mmc_request_done(host->mmc, mrq); +} + +/* These are the bitmasks the tmio chip requires to implement the MMC response + * types. Note that R1 and R6 are the same in this scheme. */ +#define APP_CMD 0x0040 +#define RESP_NONE 0x0300 +#define RESP_R1 0x0400 +#define RESP_R1B 0x0500 +#define RESP_R2 0x0600 +#define RESP_R3 0x0700 +#define DATA_PRESENT 0x0800 +#define TRANSFER_READ 0x1000 +#define TRANSFER_MULTI 0x2000 +#define SECURITY_CMD 0x4000 + +static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) +{ + struct mmc_data *data = host->data; + int c = cmd->opcode; + + /* Command 12 is handled by hardware */ + if (cmd->opcode == 12 && !cmd->arg) { + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); + return 0; + } + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: c |= RESP_NONE; break; + case MMC_RSP_R1: c |= RESP_R1; break; + case MMC_RSP_R1B: c |= RESP_R1B; break; + case MMC_RSP_R2: c |= RESP_R2; break; + case MMC_RSP_R3: c |= RESP_R3; break; + default: + pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); + return -EINVAL; + } + + host->cmd = cmd; + +/* FIXME - this seems to be ok commented out but the spec suggest this bit + * should be set when issuing app commands. + * if(cmd->flags & MMC_FLAG_ACMD) + * c |= APP_CMD; + */ + if (data) { + c |= DATA_PRESENT; + if (data->blocks > 1) { + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); + c |= TRANSFER_MULTI; + } + if (data->flags & MMC_DATA_READ) + c |= TRANSFER_READ; + } + + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); + + /* Fire off the command */ + sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); + sd_ctrl_write16(host, CTL_SD_CMD, c); + + return 0; +} + +/* + * This chip always returns (at least?) as much data as you ask for. + * I'm unsure what happens if you ask for less than a block. This should be + * looked into to ensure that a funny length read doesn't hose the controller. + */ +static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) +{ + struct mmc_data *data = host->data; + void *sg_virt; + unsigned short *buf; + unsigned int count; + unsigned long flags; + + if ((host->chan_tx || host->chan_rx) && !host->force_pio) { + pr_err("PIO IRQ in DMA mode!\n"); + return; + } else if (!data) { + pr_debug("Spurious PIO IRQ\n"); + return; + } + + sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); + buf = (unsigned short *)(sg_virt + host->sg_off); + + count = host->sg_ptr->length - host->sg_off; + if (count > data->blksz) + count = data->blksz; + + pr_debug("count: %08x offset: %08x flags %08x\n", + count, host->sg_off, data->flags); + + /* Transfer the data */ + if (data->flags & MMC_DATA_READ) + sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); + else + sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); + + host->sg_off += count; + + tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); + + if (host->sg_off == host->sg_ptr->length) + tmio_mmc_next_sg(host); + + return; +} + +static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) +{ + if (host->sg_ptr == &host->bounce_sg) { + unsigned long flags; + void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); + memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); + tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); + } +} + +/* needs to be called with host->lock held */ +void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) +{ + struct mmc_data *data = host->data; + struct mmc_command *stop; + + host->data = NULL; + + if (!data) { + dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); + return; + } + stop = data->stop; + + /* FIXME - return correct transfer count on errors */ + if (!data->error) + data->bytes_xfered = data->blocks * data->blksz; + else + data->bytes_xfered = 0; + + pr_debug("Completed data request\n"); + + /* + * FIXME: other drivers allow an optional stop command of any given type + * which we dont do, as the chip can auto generate them. + * Perhaps we can be smarter about when to use auto CMD12 and + * only issue the auto request when we know this is the desired + * stop command, allowing fallback to the stop command the + * upper layers expect. For now, we do what works. + */ + + if (data->flags & MMC_DATA_READ) { + if (host->chan_rx && !host->force_pio) + tmio_mmc_check_bounce_buffer(host); + dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", + host->mrq); + } else { + dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", + host->mrq); + } + + if (stop) { + if (stop->opcode == 12 && !stop->arg) + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); + else + BUG(); + } + + tmio_mmc_finish_request(host); +} + +static void tmio_mmc_data_irq(struct tmio_mmc_host *host) +{ + struct mmc_data *data; + spin_lock(&host->lock); + data = host->data; + + if (!data) + goto out; + + if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { + /* + * Has all data been written out yet? Testing on SuperH showed, + * that in most cases the first interrupt comes already with the + * BUSY status bit clear, but on some operations, like mount or + * in the beginning of a write / sync / umount, there is one + * DATAEND interrupt with the BUSY bit set, in this cases + * waiting for one more interrupt fixes the problem. + */ + if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); + tasklet_schedule(&host->dma_complete); + } + } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); + tasklet_schedule(&host->dma_complete); + } else { + tmio_mmc_do_data_irq(host); + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); + } +out: + spin_unlock(&host->lock); +} + +static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, + unsigned int stat) +{ + struct mmc_command *cmd = host->cmd; + int i, addr; + + spin_lock(&host->lock); + + if (!host->cmd) { + pr_debug("Spurious CMD irq\n"); + goto out; + } + + host->cmd = NULL; + + /* This controller is sicker than the PXA one. Not only do we need to + * drop the top 8 bits of the first response word, we also need to + * modify the order of the response for short response command types. + */ + + for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) + cmd->resp[i] = sd_ctrl_read32(host, addr); + + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); + cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); + cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); + cmd->resp[3] <<= 8; + } else if (cmd->flags & MMC_RSP_R3) { + cmd->resp[0] = cmd->resp[3]; + } + + if (stat & TMIO_STAT_CMDTIMEOUT) + cmd->error = -ETIMEDOUT; + else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) + cmd->error = -EILSEQ; + + /* If there is data to handle we enable data IRQs here, and + * we will ultimatley finish the request in the data_end handler. + * If theres no data or we encountered an error, finish now. + */ + if (host->data && !cmd->error) { + if (host->data->flags & MMC_DATA_READ) { + if (host->force_pio || !host->chan_rx) + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); + else + tasklet_schedule(&host->dma_issue); + } else { + if (host->force_pio || !host->chan_tx) + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); + else + tasklet_schedule(&host->dma_issue); + } + } else { + tmio_mmc_finish_request(host); + } + +out: + spin_unlock(&host->lock); +} + +static irqreturn_t tmio_mmc_irq(int irq, void *devid) +{ + struct tmio_mmc_host *host = devid; + struct tmio_mmc_data *pdata = host->pdata; + unsigned int ireg, irq_mask, status; + unsigned int sdio_ireg, sdio_irq_mask, sdio_status; + + pr_debug("MMC IRQ begin\n"); + + status = sd_ctrl_read32(host, CTL_STATUS); + irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); + ireg = status & TMIO_MASK_IRQ & ~irq_mask; + + sdio_ireg = 0; + if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { + sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); + sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); + sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; + + sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); + + if (sdio_ireg && !host->sdio_irq_enabled) { + pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", + sdio_status, sdio_irq_mask, sdio_ireg); + tmio_mmc_enable_sdio_irq(host->mmc, 0); + goto out; + } + + if (host->mmc->caps & MMC_CAP_SDIO_IRQ && + sdio_ireg & TMIO_SDIO_STAT_IOIRQ) + mmc_signal_sdio_irq(host->mmc); + + if (sdio_ireg) + goto out; + } + + pr_debug_status(status); + pr_debug_status(ireg); + + if (!ireg) { + tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); + + pr_warning("tmio_mmc: Spurious irq, disabling! " + "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); + pr_debug_status(status); + + goto out; + } + + while (ireg) { + /* Card insert / remove attempts */ + if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | + TMIO_STAT_CARD_REMOVE); + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); + } + + /* CRC and other errors */ +/* if (ireg & TMIO_STAT_ERR_IRQ) + * handled |= tmio_error_irq(host, irq, stat); + */ + + /* Command completion */ + if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { + tmio_mmc_ack_mmc_irqs(host, + TMIO_STAT_CMDRESPEND | + TMIO_STAT_CMDTIMEOUT); + tmio_mmc_cmd_irq(host, status); + } + + /* Data transfer */ + if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); + tmio_mmc_pio_irq(host); + } + + /* Data transfer completion */ + if (ireg & TMIO_STAT_DATAEND) { + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); + tmio_mmc_data_irq(host); + } + + /* Check status - keep going until we've handled it all */ + status = sd_ctrl_read32(host, CTL_STATUS); + irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); + ireg = status & TMIO_MASK_IRQ & ~irq_mask; + + pr_debug("Status at end of loop: %08x\n", status); + pr_debug_status(status); + } + pr_debug("MMC IRQ end\n"); + +out: + return IRQ_HANDLED; +} + +static int tmio_mmc_start_data(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + struct tmio_mmc_data *pdata = host->pdata; + + pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", + data->blksz, data->blocks); + + /* Some hardware cannot perform 2 byte requests in 4 bit mode */ + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { + int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; + + if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { + pr_err("%s: %d byte block unsupported in 4 bit mode\n", + mmc_hostname(host->mmc), data->blksz); + return -EINVAL; + } + } + + tmio_mmc_init_sg(host, data); + host->data = data; + + /* Set transfer length / blocksize */ + sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); + + tmio_mmc_start_dma(host, data); + + return 0; +} + +/* Process requests from the MMC layer */ +static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + int ret; + + if (host->mrq) + pr_debug("request not null\n"); + + host->last_req_ts = jiffies; + wmb(); + host->mrq = mrq; + + if (mrq->data) { + ret = tmio_mmc_start_data(host, mrq->data); + if (ret) + goto fail; + } + + ret = tmio_mmc_start_command(host, mrq->cmd); + if (!ret) { + schedule_delayed_work(&host->delayed_reset_work, + msecs_to_jiffies(2000)); + return; + } + +fail: + host->mrq = NULL; + host->force_pio = false; + mrq->cmd->error = ret; + mmc_request_done(mmc, mrq); +} + +/* Set MMC clock / power. + * Note: This controller uses a simple divider scheme therefore it cannot + * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as + * MMC wont run that fast, it has to be clocked at 12MHz which is the next + * slowest setting. + */ +static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + + if (ios->clock) + tmio_mmc_set_clock(host, ios->clock); + + /* Power sequence - OFF -> UP -> ON */ + if (ios->power_mode == MMC_POWER_UP) { + /* power up SD bus */ + if (host->set_pwr) + host->set_pwr(host->pdev, 1); + } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { + /* power down SD bus */ + if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) + host->set_pwr(host->pdev, 0); + tmio_mmc_clk_stop(host); + } else { + /* start bus clock */ + tmio_mmc_clk_start(host); + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); + break; + case MMC_BUS_WIDTH_4: + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); + break; + } + + /* Let things settle. delay taken from winCE driver */ + udelay(140); +} + +static int tmio_mmc_get_ro(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_data *pdata = host->pdata; + + return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || + !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); +} + +static int tmio_mmc_get_cd(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_data *pdata = host->pdata; + + if (!pdata->get_cd) + return -ENOSYS; + else + return pdata->get_cd(host->pdev); +} + +static const struct mmc_host_ops tmio_mmc_ops = { + .request = tmio_mmc_request, + .set_ios = tmio_mmc_set_ios, + .get_ro = tmio_mmc_get_ro, + .get_cd = tmio_mmc_get_cd, + .enable_sdio_irq = tmio_mmc_enable_sdio_irq, +}; + +int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, + struct platform_device *pdev, + struct tmio_mmc_data *pdata) +{ + struct tmio_mmc_host *_host; + struct mmc_host *mmc; + struct resource *res_ctl; + int ret; + u32 irq_mask = TMIO_MASK_CMD; + + res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_ctl) + return -EINVAL; + + mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + _host = mmc_priv(mmc); + _host->pdata = pdata; + _host->mmc = mmc; + _host->pdev = pdev; + platform_set_drvdata(pdev, mmc); + + _host->set_pwr = pdata->set_pwr; + _host->set_clk_div = pdata->set_clk_div; + + /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ + _host->bus_shift = resource_size(res_ctl) >> 10; + + _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); + if (!_host->ctl) { + ret = -ENOMEM; + goto host_free; + } + + mmc->ops = &tmio_mmc_ops; + mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; + mmc->f_max = pdata->hclk; + mmc->f_min = mmc->f_max / 512; + mmc->max_segs = 32; + mmc->max_blk_size = 512; + mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * + mmc->max_segs; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_req_size; + if (pdata->ocr_mask) + mmc->ocr_avail = pdata->ocr_mask; + else + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + + tmio_mmc_clk_stop(_host); + tmio_mmc_reset(_host); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto unmap_ctl; + + _host->irq = ret; + + tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); + if (pdata->flags & TMIO_MMC_SDIO_IRQ) + tmio_mmc_enable_sdio_irq(mmc, 0); + + ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | + IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); + if (ret) + goto unmap_ctl; + + spin_lock_init(&_host->lock); + + /* Init delayed work for request timeouts */ + INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); + + /* See if we also get DMA */ + tmio_mmc_request_dma(_host, pdata); + + mmc_add_host(mmc); + + /* Unmask the IRQs we want to know about */ + if (!_host->chan_rx) + irq_mask |= TMIO_MASK_READOP; + if (!_host->chan_tx) + irq_mask |= TMIO_MASK_WRITEOP; + + tmio_mmc_enable_mmc_irqs(_host, irq_mask); + + *host = _host; + + return 0; + +unmap_ctl: + iounmap(_host->ctl); +host_free: + mmc_free_host(mmc); + + return ret; +} +EXPORT_SYMBOL(tmio_mmc_host_probe); + +void tmio_mmc_host_remove(struct tmio_mmc_host *host) +{ + mmc_remove_host(host->mmc); + cancel_delayed_work_sync(&host->delayed_reset_work); + tmio_mmc_release_dma(host); + free_irq(host->irq, host); + iounmap(host->ctl); + mmc_free_host(host->mmc); +} +EXPORT_SYMBOL(tmio_mmc_host_remove); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 9ed84ddb4780..4dfe2c02ea91 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -802,12 +802,9 @@ static const struct mmc_host_ops via_sdc_ops = { static void via_reset_pcictrl(struct via_crdr_mmc_host *host) { - void __iomem *addrbase; unsigned long flags; u8 gatt; - addrbase = host->pcictrl_mmiobase; - spin_lock_irqsave(&host->lock, flags); via_save_pcictrlreg(host); @@ -1090,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev, struct mmc_host *mmc; struct via_crdr_mmc_host *sdhost; u32 base, len; - u8 rev, gatt; + u8 gatt; int ret; - pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev); pr_info(DRV_NAME ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, - (int)rev); + (int)pcidev->revision); ret = pci_enable_device(pcidev); if (ret) diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 7fca0a386ba0..62e5a4d171e1 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -484,7 +484,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host) /* * Check that we aren't being called after the - * entire buffer has been transfered. + * entire buffer has been transferred. */ if (host->num_sg == 0) return; @@ -828,7 +828,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq) /* * If this is a data transfer the request * will be finished after the data has - * transfered. + * transferred. */ if (cmd->data && !cmd->error) { /* @@ -904,7 +904,7 @@ static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) setup &= ~WBSD_DAT3_H; /* - * We cannot resume card detection immediatly + * We cannot resume card detection immediately * because of capacitance and delays in the chip. */ mod_timer(&host->ignore_timer, jiffies + HZ / 100); |