summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/core.c
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2017-09-22 15:36:52 +0300
committerUlf Hansson <ulf.hansson@linaro.org>2017-10-30 11:45:50 +0100
commit72a5af554df837e373efb0d6c8fc68c568f9a7ac (patch)
tree934bda00bbed4f8359c1f93ee1dad3ec0a5b587f /drivers/mmc/core/core.c
parent6c0cedd1ef9527ef13e66875746570e76a3188a7 (diff)
downloadtalos-obmc-linux-72a5af554df837e373efb0d6c8fc68c568f9a7ac.tar.gz
talos-obmc-linux-72a5af554df837e373efb0d6c8fc68c568f9a7ac.zip
mmc: core: Add support for handling CQE requests
Add core support for handling CQE requests, including starting, completing and recovering. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc/core/core.c')
-rw-r--r--drivers/mmc/core/core.c163
1 files changed, 158 insertions, 5 deletions
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b997cf92ce6c..2ff614d4ffac 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
host->ops->request(host, mrq);
}
-static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
+static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
+ bool cqe)
{
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
@@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
}
if (mrq->cmd) {
- pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
- mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
- mrq->cmd->flags);
+ pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), cqe ? "CQE direct " : "",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ } else if (cqe) {
+ pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
+ mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
}
if (mrq->data) {
@@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
if (mmc_card_removed(host->card))
return -ENOMEDIUM;
- mmc_mrq_pr_debug(host, mrq);
+ mmc_mrq_pr_debug(host, mrq, false);
WARN_ON(!host->claimed);
@@ -482,6 +486,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_wait_for_req_done);
+/*
+ * mmc_cqe_start_req - Start a CQE request.
+ * @host: MMC host to start the request
+ * @mrq: request to start
+ *
+ * Start the request, re-tuning if needed and it is possible. Returns an error
+ * code if the request fails to start or -EBUSY if CQE is busy.
+ */
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /*
+ * CQE cannot process re-tuning commands. Caller must hold retuning
+ * while CQE is in use. Re-tuning can happen here only when CQE has no
+ * active requests i.e. this is the first. Note, re-tuning will call
+ * ->cqe_off().
+ */
+ err = mmc_retune(host);
+ if (err)
+ goto out_err;
+
+ mrq->host = host;
+
+ mmc_mrq_pr_debug(host, mrq, true);
+
+ err = mmc_mrq_prep(host, mrq);
+ if (err)
+ goto out_err;
+
+ err = host->cqe_ops->cqe_request(host, mrq);
+ if (err)
+ goto out_err;
+
+ trace_mmc_request_start(host, mrq);
+
+ return 0;
+
+out_err:
+ if (mrq->cmd) {
+ pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, err);
+ } else {
+ pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
+ mmc_hostname(host), mrq->tag, err);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_start_req);
+
+/**
+ * mmc_cqe_request_done - CQE has finished processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which completed
+ *
+ * CQE drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mmc_should_fail_request(host, mrq);
+
+ /* Flag re-tuning needed on CRC errors */
+ if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ))
+ mmc_retune_needed(host);
+
+ trace_mmc_request_done(host, mrq);
+
+ if (mrq->cmd) {
+ pr_debug("%s: CQE req done (direct CMD%u): %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
+ } else {
+ pr_debug("%s: CQE transfer done tag %d\n",
+ mmc_hostname(host), mrq->tag);
+ }
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ mrq->done(mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_request_done);
+
+/**
+ * mmc_cqe_post_req - CQE post process of a completed MMC request
+ * @host: MMC host
+ * @mrq: MMC request to be processed
+ */
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->cqe_ops->cqe_post_req)
+ host->cqe_ops->cqe_post_req(host, mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_post_req);
+
+/* Arbitrary 1 second timeout */
+#define MMC_CQE_RECOVERY_TIMEOUT 1000
+
+/*
+ * mmc_cqe_recovery - Recover from CQE errors.
+ * @host: MMC host to recover
+ *
+ * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
+ * in eMMC, and discarding the queue in CQE. CQE must call
+ * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
+ * fails to discard its queue.
+ */
+int mmc_cqe_recovery(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+ int err;
+
+ mmc_retune_hold_now(host);
+
+ /*
+ * Recovery is expected seldom, if at all, but it reduces performance,
+ * so make sure it is not completely silent.
+ */
+ pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_STOP_TRANSMISSION,
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ mmc_wait_for_cmd(host, &cmd, 0);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ cmd.arg = 1; /* Discard entire queue */
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
+ mmc_retune_release(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_recovery);
+
/**
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
* @host: MMC host
OpenPOWER on IntegriCloud