summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/sdhci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/sdhci.c')
-rw-r--r--drivers/mmc/host/sdhci.c994
1 files changed, 531 insertions, 463 deletions
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b413aa6c246b..17701c3da733 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/highmem.h>
-#include <linux/pci.h>
+#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
@@ -32,135 +32,6 @@
static unsigned int debug_quirks = 0;
-/*
- * Different quirks to handle when the hardware deviates from a strict
- * interpretation of the SDHCI specification.
- */
-
-/* Controller doesn't honor resets unless we touch the clock register */
-#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
-/* Controller has bad caps bits, but really supports DMA */
-#define SDHCI_QUIRK_FORCE_DMA (1<<1)
-/* Controller doesn't like to be reset when there is no card inserted. */
-#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
-/* Controller doesn't like clearing the power reg before a change */
-#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
-/* Controller has flaky internal state so reset it on each ios change */
-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
-/* Controller has an unusable DMA engine */
-#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
-/* Controller can only DMA from 32-bit aligned addresses */
-#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6)
-/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7)
-/* Controller needs to be reset after each request to stay stable */
-#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8)
-/* Controller needs voltage and power writes to happen separately */
-#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<9)
-/* Controller has an off-by-one issue with timeout value */
-#define SDHCI_QUIRK_INCR_TIMEOUT_CONTROL (1<<10)
-
-static const struct pci_device_id pci_ids[] __devinitdata = {
- {
- .vendor = PCI_VENDOR_ID_RICOH,
- .device = PCI_DEVICE_ID_RICOH_R5C822,
- .subvendor = PCI_VENDOR_ID_IBM,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
- SDHCI_QUIRK_FORCE_DMA,
- },
-
- {
- .vendor = PCI_VENDOR_ID_RICOH,
- .device = PCI_DEVICE_ID_RICOH_R5C822,
- .subvendor = PCI_VENDOR_ID_SAMSUNG,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_FORCE_DMA |
- SDHCI_QUIRK_NO_CARD_NO_RESET,
- },
-
- {
- .vendor = PCI_VENDOR_ID_RICOH,
- .device = PCI_DEVICE_ID_RICOH_R5C822,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_FORCE_DMA,
- },
-
- {
- .vendor = PCI_VENDOR_ID_TI,
- .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_FORCE_DMA,
- },
-
- {
- .vendor = PCI_VENDOR_ID_ENE,
- .device = PCI_DEVICE_ID_ENE_CB712_SD,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_BROKEN_DMA,
- },
-
- {
- .vendor = PCI_VENDOR_ID_ENE,
- .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_BROKEN_DMA,
- },
-
- {
- .vendor = PCI_VENDOR_ID_ENE,
- .device = PCI_DEVICE_ID_ENE_CB714_SD,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
- SDHCI_QUIRK_BROKEN_DMA,
- },
-
- {
- .vendor = PCI_VENDOR_ID_ENE,
- .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
- SDHCI_QUIRK_BROKEN_DMA,
- },
-
- {
- .vendor = PCI_VENDOR_ID_MARVELL,
- .device = PCI_DEVICE_ID_MARVELL_CAFE_SD,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
- SDHCI_QUIRK_INCR_TIMEOUT_CONTROL,
- },
-
- {
- .vendor = PCI_VENDOR_ID_JMICRON,
- .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR |
- SDHCI_QUIRK_32BIT_DMA_SIZE |
- SDHCI_QUIRK_RESET_AFTER_REQUEST,
- },
-
- { /* Generic SD host controller */
- PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
- },
-
- { /* end: all zeroes */ },
-};
-
-MODULE_DEVICE_TABLE(pci, pci_ids);
-
static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
static void sdhci_finish_data(struct sdhci_host *);
@@ -215,7 +86,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
{
unsigned long timeout;
- if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
+ if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT))
return;
@@ -253,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host)
SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
- SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE;
+ SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
+ SDHCI_INT_ADMA_ERROR;
writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
@@ -443,23 +315,226 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
DBG("PIO transfer complete.\n");
}
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
+static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
- u8 count;
- unsigned target_timeout, current_timeout;
+ local_irq_save(*flags);
+ return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+}
- WARN_ON(host->data);
+static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
+{
+ kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+ local_irq_restore(*flags);
+}
- if (data == NULL)
- return;
+static int sdhci_adma_table_pre(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ int direction;
- /* Sanity checks */
- BUG_ON(data->blksz * data->blocks > 524288);
- BUG_ON(data->blksz > host->mmc->max_blk_size);
- BUG_ON(data->blocks > 65535);
+ u8 *desc;
+ u8 *align;
+ dma_addr_t addr;
+ dma_addr_t align_addr;
+ int len, offset;
- host->data = data;
- host->data_early = 0;
+ struct scatterlist *sg;
+ int i;
+ char *buffer;
+ unsigned long flags;
+
+ /*
+ * The spec does not specify endianness of descriptor table.
+ * We currently guess that it is LE.
+ */
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ /*
+ * The ADMA descriptor table is mapped further down as we
+ * need to fill it with data first.
+ */
+
+ host->align_addr = dma_map_single(mmc_dev(host->mmc),
+ host->align_buffer, 128 * 4, direction);
+ if (dma_mapping_error(host->align_addr))
+ goto fail;
+ BUG_ON(host->align_addr & 0x3);
+
+ host->sg_count = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction);
+ if (host->sg_count == 0)
+ goto unmap_align;
+
+ desc = host->adma_desc;
+ align = host->align_buffer;
+
+ align_addr = host->align_addr;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ /*
+ * The SDHCI specification states that ADMA
+ * addresses must be 32-bit aligned. If they
+ * aren't, then we use a bounce buffer for
+ * the (up to three) bytes that screw up the
+ * alignment.
+ */
+ offset = (4 - (addr & 0x3)) & 0x3;
+ if (offset) {
+ if (data->flags & MMC_DATA_WRITE) {
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ memcpy(align, buffer, offset);
+ sdhci_kunmap_atomic(buffer, &flags);
+ }
+
+ desc[7] = (align_addr >> 24) & 0xff;
+ desc[6] = (align_addr >> 16) & 0xff;
+ desc[5] = (align_addr >> 8) & 0xff;
+ desc[4] = (align_addr >> 0) & 0xff;
+
+ BUG_ON(offset > 65536);
+
+ desc[3] = (offset >> 8) & 0xff;
+ desc[2] = (offset >> 0) & 0xff;
+
+ desc[1] = 0x00;
+ desc[0] = 0x21; /* tran, valid */
+
+ align += 4;
+ align_addr += 4;
+
+ desc += 8;
+
+ addr += offset;
+ len -= offset;
+ }
+
+ desc[7] = (addr >> 24) & 0xff;
+ desc[6] = (addr >> 16) & 0xff;
+ desc[5] = (addr >> 8) & 0xff;
+ desc[4] = (addr >> 0) & 0xff;
+
+ BUG_ON(len > 65536);
+
+ desc[3] = (len >> 8) & 0xff;
+ desc[2] = (len >> 0) & 0xff;
+
+ desc[1] = 0x00;
+ desc[0] = 0x21; /* tran, valid */
+
+ desc += 8;
+
+ /*
+ * If this triggers then we have a calculation bug
+ * somewhere. :/
+ */
+ WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
+ }
+
+ /*
+ * Add a terminating entry.
+ */
+ desc[7] = 0;
+ desc[6] = 0;
+ desc[5] = 0;
+ desc[4] = 0;
+
+ desc[3] = 0;
+ desc[2] = 0;
+
+ desc[1] = 0x00;
+ desc[0] = 0x03; /* nop, end, valid */
+
+ /*
+ * Resync align buffer as we might have changed it.
+ */
+ if (data->flags & MMC_DATA_WRITE) {
+ dma_sync_single_for_device(mmc_dev(host->mmc),
+ host->align_addr, 128 * 4, direction);
+ }
+
+ host->adma_addr = dma_map_single(mmc_dev(host->mmc),
+ host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
+ if (dma_mapping_error(host->align_addr))
+ goto unmap_entries;
+ BUG_ON(host->adma_addr & 0x3);
+
+ return 0;
+
+unmap_entries:
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+unmap_align:
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 4, direction);
+fail:
+ return -EINVAL;
+}
+
+static void sdhci_adma_table_post(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ int direction;
+
+ struct scatterlist *sg;
+ int i, size;
+ u8 *align;
+ char *buffer;
+ unsigned long flags;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
+ (128 * 2 + 1) * 4, DMA_TO_DEVICE);
+
+ dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+ 128 * 4, direction);
+
+ if (data->flags & MMC_DATA_READ) {
+ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+
+ align = host->align_buffer;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ if (sg_dma_address(sg) & 0x3) {
+ size = 4 - (sg_dma_address(sg) & 0x3);
+
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ memcpy(buffer, align, size);
+ sdhci_kunmap_atomic(buffer, &flags);
+
+ align += 4;
+ }
+ }
+ }
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+}
+
+static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
+{
+ u8 count;
+ unsigned target_timeout, current_timeout;
+
+ /*
+ * If the host controller provides us with an incorrect timeout
+ * value, just skip the check and use 0xE. The hardware may take
+ * longer to time out, but that's much better than having a too-short
+ * timeout value.
+ */
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
+ return 0xE;
/* timeout in us */
target_timeout = data->timeout_ns / 1000 +
@@ -484,52 +559,158 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
break;
}
- /*
- * Compensate for an off-by-one error in the CaFe hardware; otherwise,
- * a too-small count gives us interrupt timeouts.
- */
- if ((host->chip->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL))
- count++;
-
if (count >= 0xF) {
printk(KERN_WARNING "%s: Too large timeout requested!\n",
mmc_hostname(host->mmc));
count = 0xE;
}
+ return count;
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
+{
+ u8 count;
+ u8 ctrl;
+ int ret;
+
+ WARN_ON(host->data);
+
+ if (data == NULL)
+ return;
+
+ /* Sanity checks */
+ BUG_ON(data->blksz * data->blocks > 524288);
+ BUG_ON(data->blksz > host->mmc->max_blk_size);
+ BUG_ON(data->blocks > 65535);
+
+ host->data = data;
+ host->data_early = 0;
+
+ count = sdhci_calc_timeout(host, data);
writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
if (host->flags & SDHCI_USE_DMA)
host->flags |= SDHCI_REQ_USE_DMA;
- if (unlikely((host->flags & SDHCI_REQ_USE_DMA) &&
- (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) &&
- ((data->blksz * data->blocks) & 0x3))) {
- DBG("Reverting to PIO because of transfer size (%d)\n",
- data->blksz * data->blocks);
- host->flags &= ~SDHCI_REQ_USE_DMA;
+ /*
+ * FIXME: This doesn't account for merging when mapping the
+ * scatterlist.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ int broken, i;
+ struct scatterlist *sg;
+
+ broken = 0;
+ if (host->flags & SDHCI_USE_ADMA) {
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
+ broken = 1;
+ } else {
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
+ broken = 1;
+ }
+
+ if (unlikely(broken)) {
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->length & 0x3) {
+ DBG("Reverting to PIO because of "
+ "transfer size (%d)\n",
+ sg->length);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ break;
+ }
+ }
+ }
}
/*
* The assumption here being that alignment is the same after
* translation to device address space.
*/
- if (unlikely((host->flags & SDHCI_REQ_USE_DMA) &&
- (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
- (data->sg->offset & 0x3))) {
- DBG("Reverting to PIO because of bad alignment\n");
- host->flags &= ~SDHCI_REQ_USE_DMA;
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ int broken, i;
+ struct scatterlist *sg;
+
+ broken = 0;
+ if (host->flags & SDHCI_USE_ADMA) {
+ /*
+ * As we use 3 byte chunks to work around
+ * alignment problems, we need to check this
+ * quirk.
+ */
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
+ broken = 1;
+ } else {
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
+ broken = 1;
+ }
+
+ if (unlikely(broken)) {
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 0x3) {
+ DBG("Reverting to PIO because of "
+ "bad alignment\n");
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ break;
+ }
+ }
+ }
}
if (host->flags & SDHCI_REQ_USE_DMA) {
- int count;
+ if (host->flags & SDHCI_USE_ADMA) {
+ ret = sdhci_adma_table_pre(host, data);
+ if (ret) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~SDHCI_USE_DMA;
+ } else {
+ writel(host->adma_addr,
+ host->ioaddr + SDHCI_ADMA_ADDRESS);
+ }
+ } else {
+ int sg_cnt;
+
+ sg_cnt = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ if (sg_cnt == 0) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~SDHCI_USE_DMA;
+ } else {
+ WARN_ON(count != 1);
+ writel(sg_dma_address(data->sg),
+ host->ioaddr + SDHCI_DMA_ADDRESS);
+ }
+ }
+ }
- count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len,
- (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
- BUG_ON(count != 1);
+ /*
+ * Always adjust the DMA selection as some controllers
+ * (e.g. JMicron) can't do PIO properly when the selection
+ * is ADMA.
+ */
+ if (host->version >= SDHCI_SPEC_200) {
+ ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ if ((host->flags & SDHCI_REQ_USE_DMA) &&
+ (host->flags & SDHCI_USE_ADMA))
+ ctrl |= SDHCI_CTRL_ADMA32;
+ else
+ ctrl |= SDHCI_CTRL_SDMA;
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+ }
- writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS);
- } else {
+ if (!(host->flags & SDHCI_REQ_USE_DMA)) {
host->cur_sg = data->sg;
host->num_sg = data->sg_len;
@@ -567,7 +748,6 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
static void sdhci_finish_data(struct sdhci_host *host)
{
struct mmc_data *data;
- u16 blocks;
BUG_ON(!host->data);
@@ -575,25 +755,26 @@ static void sdhci_finish_data(struct sdhci_host *host)
host->data = NULL;
if (host->flags & SDHCI_REQ_USE_DMA) {
- pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len,
- (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
+ if (host->flags & SDHCI_USE_ADMA)
+ sdhci_adma_table_post(host, data);
+ else {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
}
/*
- * Controller doesn't count down when in single block mode.
+ * The specification states that the block count register must
+ * be updated, but it does not specify at what point in the
+ * data flow. That makes the register entirely useless to read
+ * back so we have to assume that nothing made it to the card
+ * in the event of an error.
*/
- if (data->blocks == 1)
- blocks = (data->error == 0) ? 0 : 1;
+ if (data->error)
+ data->bytes_xfered = 0;
else
- blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
- data->bytes_xfered = data->blksz * (data->blocks - blocks);
-
- if (!data->error && blocks) {
- printk(KERN_ERR "%s: Controller signalled completion even "
- "though there were blocks left.\n",
- mmc_hostname(host->mmc));
- data->error = -EIO;
- }
+ data->bytes_xfered = data->blksz * data->blocks;
if (data->stop) {
/*
@@ -775,7 +956,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
* Spec says that we should clear the power reg before setting
* a new value. Some controllers don't seem to like this though.
*/
- if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
pwr = SDHCI_POWER_ON;
@@ -797,10 +978,10 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
}
/*
- * At least the CaFe chip gets confused if we set the voltage
+ * At least the Marvell CaFe chip gets confused if we set the voltage
* and set turn on power at the same time, so set the voltage first.
*/
- if ((host->chip->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
+ if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
writeb(pwr & ~SDHCI_POWER_ON,
host->ioaddr + SDHCI_POWER_CONTROL);
@@ -833,7 +1014,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
- if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
+ if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)
+ || (host->flags & SDHCI_DEVICE_DEAD)) {
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
} else
@@ -853,6 +1035,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags);
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
/*
* Reset the chip on each power off.
* Should clear out any weird states.
@@ -888,9 +1073,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* signalling timeout and CRC errors even on CMD0. Resetting
* it on each ios seems to solve the problem.
*/
- if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+ if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+out:
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -905,7 +1091,10 @@ static int sdhci_get_ro(struct mmc_host *mmc)
spin_lock_irqsave(&host->lock, flags);
- present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ present = 0;
+ else
+ present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
spin_unlock_irqrestore(&host->lock, flags);
@@ -922,6 +1111,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_lock_irqsave(&host->lock, flags);
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
ier = readl(host->ioaddr + SDHCI_INT_ENABLE);
ier &= ~SDHCI_INT_CARD_INT;
@@ -931,6 +1123,7 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
writel(ier, host->ioaddr + SDHCI_INT_ENABLE);
writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
+out:
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
@@ -996,13 +1189,14 @@ static void sdhci_tasklet_finish(unsigned long param)
* The controller needs a reset of internal state machines
* upon error conditions.
*/
- if (mrq->cmd->error ||
- (mrq->data && (mrq->data->error ||
- (mrq->data->stop && mrq->data->stop->error))) ||
- (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) {
+ if (!(host->flags & SDHCI_DEVICE_DEAD) &&
+ (mrq->cmd->error ||
+ (mrq->data && (mrq->data->error ||
+ (mrq->data->stop && mrq->data->stop->error))) ||
+ (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
/* Some controllers need this kick or reset won't work here */
- if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
unsigned int clock;
/* This is to force an update */
@@ -1116,6 +1310,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
host->data->error = -ETIMEDOUT;
else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
host->data->error = -EILSEQ;
+ else if (intmask & SDHCI_INT_ADMA_ERROR)
+ host->data->error = -EIO;
if (host->data->error)
sdhci_finish_data(host);
@@ -1234,218 +1430,167 @@ out:
#ifdef CONFIG_PM
-static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
{
- struct sdhci_chip *chip;
- int i, ret;
-
- chip = pci_get_drvdata(pdev);
- if (!chip)
- return 0;
-
- DBG("Suspending...\n");
-
- for (i = 0;i < chip->num_slots;i++) {
- if (!chip->hosts[i])
- continue;
- ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
- if (ret) {
- for (i--;i >= 0;i--)
- mmc_resume_host(chip->hosts[i]->mmc);
- return ret;
- }
- }
-
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+ int ret;
- for (i = 0;i < chip->num_slots;i++) {
- if (!chip->hosts[i])
- continue;
- free_irq(chip->hosts[i]->irq, chip->hosts[i]);
- }
+ ret = mmc_suspend_host(host->mmc, state);
+ if (ret)
+ return ret;
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ free_irq(host->irq, host);
return 0;
}
-static int sdhci_resume (struct pci_dev *pdev)
-{
- struct sdhci_chip *chip;
- int i, ret;
+EXPORT_SYMBOL_GPL(sdhci_suspend_host);
- chip = pci_get_drvdata(pdev);
- if (!chip)
- return 0;
+int sdhci_resume_host(struct sdhci_host *host)
+{
+ int ret;
- DBG("Resuming...\n");
+ if (host->flags & SDHCI_USE_DMA) {
+ if (host->ops->enable_dma)
+ host->ops->enable_dma(host);
+ }
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
+ ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
+ mmc_hostname(host->mmc), host);
if (ret)
return ret;
- for (i = 0;i < chip->num_slots;i++) {
- if (!chip->hosts[i])
- continue;
- if (chip->hosts[i]->flags & SDHCI_USE_DMA)
- pci_set_master(pdev);
- ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
- IRQF_SHARED, mmc_hostname(chip->hosts[i]->mmc),
- chip->hosts[i]);
- if (ret)
- return ret;
- sdhci_init(chip->hosts[i]);
- mmiowb();
- ret = mmc_resume_host(chip->hosts[i]->mmc);
- if (ret)
- return ret;
- }
+ sdhci_init(host);
+ mmiowb();
+
+ ret = mmc_resume_host(host->mmc);
+ if (ret)
+ return ret;
return 0;
}
-#else /* CONFIG_PM */
-
-#define sdhci_suspend NULL
-#define sdhci_resume NULL
+EXPORT_SYMBOL_GPL(sdhci_resume_host);
#endif /* CONFIG_PM */
/*****************************************************************************\
* *
- * Device probing/removal *
+ * Device allocation/registration *
* *
\*****************************************************************************/
-static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
+struct sdhci_host *sdhci_alloc_host(struct device *dev,
+ size_t priv_size)
{
- int ret;
- unsigned int version;
- struct sdhci_chip *chip;
struct mmc_host *mmc;
struct sdhci_host *host;
- u8 first_bar;
- unsigned int caps;
-
- chip = pci_get_drvdata(pdev);
- BUG_ON(!chip);
-
- ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
- if (ret)
- return ret;
-
- first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
-
- if (first_bar > 5) {
- printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
- return -ENODEV;
- }
-
- if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
- printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
- return -ENODEV;
- }
-
- if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
- printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
- "You may experience problems.\n");
- }
-
- if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
- printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
- return -ENODEV;
- }
-
- if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
- printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
- return -ENODEV;
- }
+ WARN_ON(dev == NULL);
- mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev);
+ mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
if (!mmc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
host = mmc_priv(mmc);
host->mmc = mmc;
- host->chip = chip;
- chip->hosts[slot] = host;
+ return host;
+}
- host->bar = first_bar + slot;
+EXPORT_SYMBOL_GPL(sdhci_alloc_host);
- host->addr = pci_resource_start(pdev, host->bar);
- host->irq = pdev->irq;
+int sdhci_add_host(struct sdhci_host *host)
+{
+ struct mmc_host *mmc;
+ unsigned int caps;
+ int ret;
- DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq);
+ WARN_ON(host == NULL);
+ if (host == NULL)
+ return -EINVAL;
- ret = pci_request_region(pdev, host->bar, mmc_hostname(mmc));
- if (ret)
- goto free;
+ mmc = host->mmc;
- host->ioaddr = ioremap_nocache(host->addr,
- pci_resource_len(pdev, host->bar));
- if (!host->ioaddr) {
- ret = -ENOMEM;
- goto release;
- }
+ if (debug_quirks)
+ host->quirks = debug_quirks;
sdhci_reset(host, SDHCI_RESET_ALL);
- version = readw(host->ioaddr + SDHCI_HOST_VERSION);
- version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
- if (version > 1) {
+ host->version = readw(host->ioaddr + SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+ >> SDHCI_SPEC_VER_SHIFT;
+ if (host->version > SDHCI_SPEC_200) {
printk(KERN_ERR "%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
- version);
+ host->version);
}
caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
- if (chip->quirks & SDHCI_QUIRK_FORCE_DMA)
+ if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_DMA;
else if (!(caps & SDHCI_CAN_DO_DMA))
DBG("Controller doesn't have DMA capability\n");
else
host->flags |= SDHCI_USE_DMA;
- if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
(host->flags & SDHCI_USE_DMA)) {
DBG("Disabling DMA as it is marked broken\n");
host->flags &= ~SDHCI_USE_DMA;
}
- if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
- (host->flags & SDHCI_USE_DMA)) {
- printk(KERN_WARNING "%s: Will use DMA "
- "mode even though HW doesn't fully "
- "claim to support it.\n", mmc_hostname(mmc));
+ if (host->flags & SDHCI_USE_DMA) {
+ if ((host->version >= SDHCI_SPEC_200) &&
+ (caps & SDHCI_CAN_DO_ADMA2))
+ host->flags |= SDHCI_USE_ADMA;
+ }
+
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
+ (host->flags & SDHCI_USE_ADMA)) {
+ DBG("Disabling ADMA as it is marked broken\n");
+ host->flags &= ~SDHCI_USE_ADMA;
}
if (host->flags & SDHCI_USE_DMA) {
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
- printk(KERN_WARNING "%s: No suitable DMA available. "
- "Falling back to PIO.\n", mmc_hostname(mmc));
- host->flags &= ~SDHCI_USE_DMA;
+ if (host->ops->enable_dma) {
+ if (host->ops->enable_dma(host)) {
+ printk(KERN_WARNING "%s: No suitable DMA "
+ "available. Falling back to PIO.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
+ }
}
}
- if (host->flags & SDHCI_USE_DMA)
- pci_set_master(pdev);
- else /* XXX: Hack to get MMC layer to avoid highmem */
- pdev->dma_mask = 0;
+ if (host->flags & SDHCI_USE_ADMA) {
+ /*
+ * We need to allocate descriptors for all sg entries
+ * (128) and potentially one alignment transfer for
+ * each of those entries.
+ */
+ host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
+ host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
+ if (!host->adma_desc || !host->align_buffer) {
+ kfree(host->adma_desc);
+ kfree(host->align_buffer);
+ printk(KERN_WARNING "%s: Unable to allocate ADMA "
+ "buffers. Falling back to standard DMA.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_ADMA;
+ }
+ }
+
+ /* XXX: Hack to get MMC layer to avoid highmem */
+ if (!(host->flags & SDHCI_USE_DMA))
+ mmc_dev(host->mmc)->dma_mask = NULL;
host->max_clk =
(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
if (host->max_clk == 0) {
printk(KERN_ERR "%s: Hardware doesn't specify base clock "
"frequency.\n", mmc_hostname(mmc));
- ret = -ENODEV;
- goto unmap;
+ return -ENODEV;
}
host->max_clk *= 1000000;
@@ -1454,8 +1599,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
if (host->timeout_clk == 0) {
printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
"frequency.\n", mmc_hostname(mmc));
- ret = -ENODEV;
- goto unmap;
+ return -ENODEV;
}
if (caps & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
@@ -1466,7 +1610,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
mmc->ops = &sdhci_ops;
mmc->f_min = host->max_clk / 256;
mmc->f_max = host->max_clk;
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
if (caps & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
@@ -1482,20 +1626,22 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
if (mmc->ocr_avail == 0) {
printk(KERN_ERR "%s: Hardware doesn't report any "
"support voltages.\n", mmc_hostname(mmc));
- ret = -ENODEV;
- goto unmap;
+ return -ENODEV;
}
spin_lock_init(&host->lock);
/*
- * Maximum number of segments. Hardware cannot do scatter lists.
+ * Maximum number of segments. Depends on if the hardware
+ * can do scatter/gather or not.
*/
- if (host->flags & SDHCI_USE_DMA)
+ if (host->flags & SDHCI_USE_ADMA)
+ mmc->max_hw_segs = 128;
+ else if (host->flags & SDHCI_USE_DMA)
mmc->max_hw_segs = 1;
- else
- mmc->max_hw_segs = 16;
- mmc->max_phys_segs = 16;
+ else /* PIO */
+ mmc->max_hw_segs = 128;
+ mmc->max_phys_segs = 128;
/*
* Maximum number of sectors in one transfer. Limited by DMA boundary
@@ -1505,9 +1651,13 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
/*
* Maximum segment size. Could be one segment with the maximum number
- * of bytes.
+ * of bytes. When doing hardware scatter/gather, each entry cannot
+ * be larger than 64 KiB though.
*/
- mmc->max_seg_size = mmc->max_req_size;
+ if (host->flags & SDHCI_USE_ADMA)
+ mmc->max_seg_size = 65536;
+ else
+ mmc->max_seg_size = mmc->max_req_size;
/*
* Maximum block size. This varies from controller to controller and
@@ -1553,7 +1703,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
host->led.default_trigger = mmc_hostname(mmc);
host->led.brightness_set = sdhci_led_control;
- ret = led_classdev_register(&pdev->dev, &host->led);
+ ret = led_classdev_register(mmc_dev(mmc), &host->led);
if (ret)
goto reset;
#endif
@@ -1562,8 +1712,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
mmc_add_host(mmc);
- printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n",
- mmc_hostname(mmc), host->addr, host->irq,
+ printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
+ mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id,
+ (host->flags & SDHCI_USE_ADMA)?"A":"",
(host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
return 0;
@@ -1576,35 +1727,40 @@ reset:
untasklet:
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
-unmap:
- iounmap(host->ioaddr);
-release:
- pci_release_region(pdev, host->bar);
-free:
- mmc_free_host(mmc);
return ret;
}
-static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
+EXPORT_SYMBOL_GPL(sdhci_add_host);
+
+void sdhci_remove_host(struct sdhci_host *host, int dead)
{
- struct sdhci_chip *chip;
- struct mmc_host *mmc;
- struct sdhci_host *host;
+ unsigned long flags;
- chip = pci_get_drvdata(pdev);
- host = chip->hosts[slot];
- mmc = host->mmc;
+ if (dead) {
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->flags |= SDHCI_DEVICE_DEAD;
+
+ if (host->mrq) {
+ printk(KERN_ERR "%s: Controller removed during "
+ " transfer!\n", mmc_hostname(host->mmc));
- chip->hosts[slot] = NULL;
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
- mmc_remove_host(mmc);
+ mmc_remove_host(host->mmc);
#ifdef CONFIG_LEDS_CLASS
led_classdev_unregister(&host->led);
#endif
- sdhci_reset(host, SDHCI_RESET_ALL);
+ if (!dead)
+ sdhci_reset(host, SDHCI_RESET_ALL);
free_irq(host->irq, host);
@@ -1613,106 +1769,21 @@ static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
- iounmap(host->ioaddr);
-
- pci_release_region(pdev, host->bar);
+ kfree(host->adma_desc);
+ kfree(host->align_buffer);
- mmc_free_host(mmc);
+ host->adma_desc = NULL;
+ host->align_buffer = NULL;
}
-static int __devinit sdhci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int ret, i;
- u8 slots, rev;
- struct sdhci_chip *chip;
-
- BUG_ON(pdev == NULL);
- BUG_ON(ent == NULL);
+EXPORT_SYMBOL_GPL(sdhci_remove_host);
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
-
- printk(KERN_INFO DRIVER_NAME
- ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
- pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
- (int)rev);
-
- ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
- if (ret)
- return ret;
-
- slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
- DBG("found %d slot(s)\n", slots);
- if (slots == 0)
- return -ENODEV;
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- chip = kzalloc(sizeof(struct sdhci_chip) +
- sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
- if (!chip) {
- ret = -ENOMEM;
- goto err;
- }
-
- chip->pdev = pdev;
- chip->quirks = ent->driver_data;
-
- if (debug_quirks)
- chip->quirks = debug_quirks;
-
- chip->num_slots = slots;
- pci_set_drvdata(pdev, chip);
-
- for (i = 0;i < slots;i++) {
- ret = sdhci_probe_slot(pdev, i);
- if (ret) {
- for (i--;i >= 0;i--)
- sdhci_remove_slot(pdev, i);
- goto free;
- }
- }
-
- return 0;
-
-free:
- pci_set_drvdata(pdev, NULL);
- kfree(chip);
-
-err:
- pci_disable_device(pdev);
- return ret;
-}
-
-static void __devexit sdhci_remove(struct pci_dev *pdev)
+void sdhci_free_host(struct sdhci_host *host)
{
- int i;
- struct sdhci_chip *chip;
-
- chip = pci_get_drvdata(pdev);
-
- if (chip) {
- for (i = 0;i < chip->num_slots;i++)
- sdhci_remove_slot(pdev, i);
-
- pci_set_drvdata(pdev, NULL);
-
- kfree(chip);
- }
-
- pci_disable_device(pdev);
+ mmc_free_host(host->mmc);
}
-static struct pci_driver sdhci_driver = {
- .name = DRIVER_NAME,
- .id_table = pci_ids,
- .probe = sdhci_probe,
- .remove = __devexit_p(sdhci_remove),
- .suspend = sdhci_suspend,
- .resume = sdhci_resume,
-};
+EXPORT_SYMBOL_GPL(sdhci_free_host);
/*****************************************************************************\
* *
@@ -1726,14 +1797,11 @@ static int __init sdhci_drv_init(void)
": Secure Digital Host Controller Interface driver\n");
printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
- return pci_register_driver(&sdhci_driver);
+ return 0;
}
static void __exit sdhci_drv_exit(void)
{
- DBG("Exiting\n");
-
- pci_unregister_driver(&sdhci_driver);
}
module_init(sdhci_drv_init);
@@ -1742,7 +1810,7 @@ module_exit(sdhci_drv_exit);
module_param(debug_quirks, uint, 0444);
MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
-MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver");
+MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
OpenPOWER on IntegriCloud