summaryrefslogtreecommitdiff
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorPer Forlin <per.forlin@linaro.org>2011-07-01 20:55:31 +0400
committerChris Ball <cjb@laptop.org>2011-07-21 01:21:15 +0400
commit04296b7bfda45295a568b4b312e03828fae801dc (patch)
treed7e61107bab3cb25d3f881506056ba2754eb2284 /drivers/mmc/card
parentd78d4a8ad53f345dd3c0bb5f8d377baa523739f7 (diff)
downloadlinux-04296b7bfda45295a568b4b312e03828fae801dc.tar.xz
mmc: queue: add a second mmc queue request member
Add an additional mmc queue request instance to make way for two active block requests. One request may be active while the other request is being prepared. Signed-off-by: Per Forlin <per.forlin@linaro.org> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Venkatraman S <svenkatr@ti.com> Tested-by: Sourav Poddar <sourav.poddar@ti.com> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/queue.c44
-rw-r--r--drivers/mmc/card/queue.h3
2 files changed, 44 insertions, 3 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 9122ff5f39c8..a38d310f5030 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -153,6 +153,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
u64 limit = BLK_BOUNCE_HIGH;
int ret;
struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
@@ -163,7 +164,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return -ENOMEM;
memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
+ memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
mq->mqrq_cur = mqrq_cur;
+ mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
@@ -191,9 +194,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
"allocate bounce cur buffer\n",
mmc_card_name(card));
}
+ mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_prev->bounce_buf) {
+ printk(KERN_WARNING "%s: unable to "
+ "allocate bounce prev buffer\n",
+ mmc_card_name(card));
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+ }
}
- if (mqrq_cur->bounce_buf) {
+ if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
@@ -208,11 +219,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (ret)
goto cleanup_queue;
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ goto cleanup_queue;
}
}
#endif
- if (!mqrq_cur->bounce_buf) {
+ if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
@@ -223,6 +242,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (ret)
goto cleanup_queue;
+
+ mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
+ goto cleanup_queue;
}
sema_init(&mq->thread_sem, 1);
@@ -239,6 +262,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
free_bounce_sg:
kfree(mqrq_cur->bounce_sg);
mqrq_cur->bounce_sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
cleanup_queue:
kfree(mqrq_cur->sg);
@@ -246,6 +271,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
kfree(mqrq_cur->bounce_buf);
mqrq_cur->bounce_buf = NULL;
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -255,6 +285,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
struct request_queue *q = mq->queue;
unsigned long flags;
struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -277,6 +308,15 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
kfree(mqrq_cur->bounce_buf);
mqrq_cur->bounce_buf = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index c1a69ac6fff0..1a637d2e2ca6 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -29,8 +29,9 @@ struct mmc_queue {
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
- struct mmc_queue_req mqrq[1];
+ struct mmc_queue_req mqrq[2];
struct mmc_queue_req *mqrq_cur;
+ struct mmc_queue_req *mqrq_prev;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,