lkml.org 
[lkml]   [2008]   [Oct]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 2/2] mmc_block: ensure all sectors that do not have errors are read
If a card encounters an ECC error while reading a sector it will
timeout. Instead of reporting the entire I/O request as having
an error, redo the I/O one sector at a time so that all readable
sectors are provided to the upper layers.

Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com>
---
drivers/mmc/card/block.c | 32 ++++++++++++++++++++++++++------
1 files changed, 26 insertions(+), 6 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index d121462..0566aae 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -256,13 +256,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request brq;
- int ret = 1;
+ int ret = 1, disable_multi = 0;

mmc_claim_host(card->host);

do {
struct mmc_command cmd;
u32 readcmd, writecmd;
+ int multi, err;

memset(&brq, 0, sizeof(struct mmc_blk_request));
brq.mrq.cmd = &brq.cmd;
@@ -278,6 +279,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq.data.blocks = req->nr_sectors;

+ if (disable_multi && brq.data.blocks > 1)
+ brq.data.blocks = 1;
+
if (brq.data.blocks > 1) {
/* SPI multiblock writes terminate using a special
* token, not a STOP_TRANSMISSION request.
@@ -287,10 +291,12 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.mrq.stop = &brq.stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ multi = 1;
} else {
brq.mrq.stop = NULL;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
+ multi = 0;
}

if (rq_data_dir(req) == READ) {
@@ -312,6 +318,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)

mmc_queue_bounce_post(mq);

+ if (multi && rq_data_dir(req) == READ &&
+ brq.data.error == -ETIMEDOUT) {
+ /* Redo read one sector at a time */
+ disable_multi = 1;
+ continue;
+ }
+
/*
* Check for errors here, but don't jump to cmd_err
* until later as we need to wait for the card to leave
@@ -360,14 +373,21 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
#endif
}

- if (brq.cmd.error || brq.data.error || brq.stop.error)
+ if (brq.cmd.error || brq.stop.error)
goto cmd_err;

- /*
- * A block was successfully transferred.
- */
+ if (brq.data.error) {
+ if (brq.data.error == -ETIMEDOUT &&
+ rq_data_dir(req) == READ) {
+ err = -EIO;
+ brq.data.bytes_xfered = brq.data.blksz;
+ } else
+ goto cmd_err;
+ } else
+ err = 0;
+
spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ ret = __blk_end_request(req, err, brq.data.bytes_xfered);
spin_unlock_irq(&md->lock);
} while (ret);

--
1.5.4.3

\
 
 \ /
  Last update: 2008-10-16 15:25    [W:0.092 / U:0.040 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site