summaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorDouglas Anderson <dianders@chromium.org>2017-10-12 13:11:18 -0700
committerUlf Hansson <ulf.hansson@linaro.org>2017-11-02 15:20:41 +0100
commit93c23ae385299f889606b42507b12b40e50d6088 (patch)
treeab9222986faed046208bb655bcabd8abd1a1f54d /drivers/mmc
parenteaaffcefaa8015aaf496590707ddfa70905e0001 (diff)
downloadblackbird-op-linux-93c23ae385299f889606b42507b12b40e50d6088.tar.gz
blackbird-op-linux-93c23ae385299f889606b42507b12b40e50d6088.zip
mmc: dw_mmc: Cleanup the DTO timer like the CTO one
The recent CTO timer introduced in commit 03de19212ea3 ("mmc: dw_mmc: introduce timer for broken command transfer over scheme") was causing observable problems due to race conditions. Previous patches have fixed those race conditions. It can be observed that these same race conditions ought to be theoretically possible with the DTO timer too though they are massively less likely to happen because the data timeout is always set to 0xffffff right now. That means even at a 200 MHz card clock we were arming the DTO timer for 94 ms: >>> (0xffffff * 1000. / 200000000) + 10 93.886075 We always also were setting the DTO timer _after_ starting the transfer, unlike how the old code was seting the CTO timer. In any case, even though the DTO timer is much less likely to have races, it still makes sense to add code to handle it _just in case_. Signed-off-by: Douglas Anderson <dianders@chromium.org> Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/dw_mmc.c55
1 files changed, 52 insertions, 3 deletions
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index c365742d6b7f..37b55b095daf 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1938,6 +1938,7 @@ static void dw_mci_set_drto(struct dw_mci *host)
unsigned int drto_clks;
unsigned int drto_div;
unsigned int drto_ms;
+ unsigned long irqflags;
drto_clks = mci_readl(host, TMOUT) >> 8;
drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
@@ -1949,7 +1950,11 @@ static void dw_mci_set_drto(struct dw_mci *host)
/* add a bit spare time */
drto_ms += 10;
- mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ mod_timer(&host->dto_timer,
+ jiffies + msecs_to_jiffies(drto_ms));
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
@@ -1970,6 +1975,18 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
return true;
}
+static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
+{
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ return false;
+
+ /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
+ WARN_ON(del_timer_sync(&host->dto_timer));
+ clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+
+ return true;
+}
+
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
@@ -2111,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_DATA_BUSY:
- if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events)) {
+ if (!dw_mci_clear_pending_data_complete(host)) {
/*
* If data error interrupt comes but data over
* interrupt doesn't come within the given time.
@@ -2682,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
@@ -2694,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & SDMMC_INT_RXDR) {
@@ -3043,7 +3063,31 @@ exit:
static void dw_mci_dto_timer(unsigned long arg)
{
struct dw_mci *host = (struct dw_mci *)arg;
+ unsigned long irqflags;
+ u32 pending;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ /*
+ * The DTO timer is much longer than the CTO timer, so it's even less
+ * likely that we'll these cases, but it pays to be paranoid.
+ */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ if (pending & SDMMC_INT_DATA_OVER) {
+ /* The interrupt should fire; no need to act but we can warn */
+ dev_warn(host->dev, "Unexpected data interrupt latency\n");
+ goto exit;
+ }
+ if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
+ /* Presumably interrupt handler couldn't delete the timer */
+ dev_warn(host->dev, "DTO timeout when already completed\n");
+ goto exit;
+ }
+
+ /*
+ * Continued paranoia to make sure we're in the state we expect.
+ * This paranoia isn't really justified but it seems good to be safe.
+ */
switch (host->state) {
case STATE_SENDING_DATA:
case STATE_DATA_BUSY:
@@ -3058,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
break;
default:
+ dev_warn(host->dev, "Unexpected data timeout, state %d\n",
+ host->state);
break;
}
+
+exit:
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
#ifdef CONFIG_OF
OpenPOWER on IntegriCloud