From ec5a19dd935eb3793e1f6ed491e8035b3d7b1df9 Mon Sep 17 00:00:00 2001 From: Pierre Ossman Date: Fri, 6 Oct 2006 00:44:03 -0700 Subject: [PATCH] mmc: multi sector write transfers SD cards extend the protocol by allowing the host to query a card how many blocks were successfully stored on the medium. This allows us to safely write chunks of blocks at once. Signed-off-by: Pierre Ossman Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/mmc_block.c | 104 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 95 insertions(+), 9 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index c1293f1bda87..f9027c8db792 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -154,6 +155,71 @@ static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req) return stat; } +static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) +{ + int err; + u32 blocks; + + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_data data; + unsigned int timeout_us; + + struct scatterlist sg; + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = MMC_APP_CMD; + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(card->host, &cmd, 0); + if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) + return (u32)-1; + + memset(&cmd, 0, sizeof(struct mmc_command)); + + cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + memset(&data, 0, sizeof(struct mmc_data)); + + data.timeout_ns = card->csd.tacc_ns * 100; + data.timeout_clks = card->csd.tacc_clks * 100; + + timeout_us = data.timeout_ns / 1000; + timeout_us += data.timeout_clks * 1000 / + (card->host->ios.clock / 1000); + + if (timeout_us > 100000) { + data.timeout_ns = 100000000; + data.timeout_clks = 0; + } + + data.blksz = 4; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + memset(&mrq, 0, sizeof(struct mmc_request)); + + mrq.cmd = &cmd; + mrq.data = &data; + + sg_init_one(&sg, &blocks, 4); + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) + return (u32)-1; + + blocks = ntohl(blocks); + + return blocks; +} + static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; @@ -184,10 +250,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) /* * If the host doesn't support multiple block writes, force - * block writes to single block. + * block writes to single block. SD cards are excepted from + * this rule as they support querying the number of + * successfully written sectors. */ if (rq_data_dir(req) != READ && - !(card->host->caps & MMC_CAP_MULTIWRITE)) + !(card->host->caps & MMC_CAP_MULTIWRITE) && + !mmc_card_sd(card)) brq.data.blocks = 1; if (brq.data.blocks > 1) { @@ -276,24 +345,41 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) return 1; cmd_err: - mmc_card_release_host(card); - ret = 1; - /* - * For writes and where the host claims to support proper - * error reporting, we first ok the successful blocks. + /* + * If this is an SD card and we're writing, we can first + * mark the known good sectors as ok. + * + * If the card is not SD, we can still ok written sectors + * if the controller can do proper error reporting. * * For reads we just fail the entire chunk as that should * be safe in all cases. */ - if (rq_data_dir(req) != READ && - (card->host->caps & MMC_CAP_MULTIWRITE)) { + if (rq_data_dir(req) != READ && mmc_card_sd(card)) { + u32 blocks; + unsigned int bytes; + + blocks = mmc_sd_num_wr_blocks(card); + if (blocks != (u32)-1) { + if (card->csd.write_partial) + bytes = blocks << md->block_bits; + else + bytes = blocks << 9; + spin_lock_irq(&md->lock); + ret = end_that_request_chunk(req, 1, bytes); + spin_unlock_irq(&md->lock); + } + } else if (rq_data_dir(req) != READ && + (card->host->caps & MMC_CAP_MULTIWRITE)) { spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); spin_unlock_irq(&md->lock); } + mmc_card_release_host(card); + spin_lock_irq(&md->lock); while (ret) { ret = end_that_request_chunk(req, 0, -- cgit v1.2.1