diff options
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r-- | drivers/mmc/card/queue.c | 316 |
1 files changed, 125 insertions, 191 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 8037f73a109a..6ae6bfb8b221 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -44,7 +44,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) return BLKPREP_KILL; - req->cmd_flags |= REQ_DONTPREP; + req->rq_flags |= RQF_DONTPREP; return BLKPREP_OK; } @@ -53,6 +53,7 @@ static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; + struct mmc_context_info *cntx = &mq->card->host->context_info; current->flags |= PF_MEMALLOC; @@ -63,6 +64,19 @@ static int mmc_queue_thread(void *d) spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); req = blk_fetch_request(q); + mq->asleep = false; + cntx->is_waiting_last_req = false; + cntx->is_new_req = false; + if (!req) { + /* + * Dispatch queue is empty so set flags for + * mmc_request_fn() to wake us up. + */ + if (mq->mqrq_prev->req) + cntx->is_waiting_last_req = true; + else + mq->asleep = true; + } mq->mqrq_cur->req = req; spin_unlock_irq(q->queue_lock); @@ -115,31 +129,24 @@ static void mmc_request_fn(struct request_queue *q) { struct mmc_queue *mq = q->queuedata; struct request *req; - unsigned long flags; struct mmc_context_info *cntx; if (!mq) { while ((req = blk_fetch_request(q)) != NULL) { - req->cmd_flags |= REQ_QUIET; + req->rq_flags |= RQF_QUIET; __blk_end_request_all(req, -EIO); } return; } cntx = &mq->card->host->context_info; - if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { - /* - * New MMC request arrived when MMC thread may be - * blocked on the previous request to be complete - * with no current request fetched - */ - spin_lock_irqsave(&cntx->lock, flags); - if (cntx->is_waiting_last_req) { - cntx->is_new_req = true; - wake_up_interruptible(&cntx->wait); - } - spin_unlock_irqrestore(&cntx->lock, flags); - } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) + + if (cntx->is_waiting_last_req) { + cntx->is_new_req = true; + wake_up_interruptible(&cntx->wait); + } + + if (mq->asleep) wake_up_process(mq->thread); } @@ -179,6 +186,82 @@ static void mmc_queue_setup_discard(struct request_queue *q, queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); } +#ifdef CONFIG_MMC_BLOCK_BOUNCE +static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, + unsigned int bouncesz) +{ + int i; + + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); + if (!mq->mqrq[i].bounce_buf) + goto out_err; + } + + return true; + +out_err: + while (--i >= 0) { + kfree(mq->mqrq[i].bounce_buf); + mq->mqrq[i].bounce_buf = NULL; + } + pr_warn("%s: unable to allocate bounce buffers\n", + mmc_card_name(mq->card)); + return false; +} + +static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, + unsigned int bouncesz) +{ + int i, ret; + + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); + if (ret) + return ret; + + mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); + if (ret) + return ret; + } + + return 0; +} +#endif + +static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) +{ + int i, ret; + + for (i = 0; i < mq->qdepth; i++) { + mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); + if (ret) + return ret; + } + + return 0; +} + +static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) +{ + kfree(mqrq->bounce_sg); + mqrq->bounce_sg = NULL; + + kfree(mqrq->sg); + mqrq->sg = NULL; + + kfree(mqrq->bounce_buf); + mqrq->bounce_buf = NULL; +} + +static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) +{ + int i; + + for (i = 0; i < mq->qdepth; i++) + mmc_queue_req_free_bufs(&mq->mqrq[i]); +} + /** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue @@ -193,9 +276,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; - int ret; - struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; - struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + bool bounce = false; + int ret = -ENOMEM; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; @@ -205,8 +287,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (!mq->queue) return -ENOMEM; - mq->mqrq_cur = mqrq_cur; - mq->mqrq_prev = mqrq_prev; + mq->qdepth = 2; + mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), + GFP_KERNEL); + if (!mq->mqrq) + goto blk_cleanup; + mq->mqrq_cur = &mq->mqrq[0]; + mq->mqrq_prev = &mq->mqrq[1]; mq->queue->queuedata = mq; blk_queue_prep_rq(mq->queue, mmc_prep_request); @@ -228,63 +315,29 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; - if (bouncesz > 512) { - mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); - if (!mqrq_cur->bounce_buf) { - pr_warn("%s: unable to allocate bounce cur buffer\n", - mmc_card_name(card)); - } else { - mqrq_prev->bounce_buf = - kmalloc(bouncesz, GFP_KERNEL); - if (!mqrq_prev->bounce_buf) { - pr_warn("%s: unable to allocate bounce prev buffer\n", - mmc_card_name(card)); - kfree(mqrq_cur->bounce_buf); - mqrq_cur->bounce_buf = NULL; - } - } - } - - if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { + if (bouncesz > 512 && + mmc_queue_alloc_bounce_bufs(mq, bouncesz)) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); - mqrq_cur->sg = mmc_alloc_sg(1, &ret); - if (ret) - goto cleanup_queue; - - mqrq_cur->bounce_sg = - mmc_alloc_sg(bouncesz / 512, &ret); - if (ret) - goto cleanup_queue; - - mqrq_prev->sg = mmc_alloc_sg(1, &ret); - if (ret) - goto cleanup_queue; - - mqrq_prev->bounce_sg = - mmc_alloc_sg(bouncesz / 512, &ret); + ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz); if (ret) goto cleanup_queue; + bounce = true; } } #endif - if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { + if (!bounce) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); - mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); - if (ret) - goto cleanup_queue; - - - mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); + ret = mmc_queue_alloc_sgs(mq, host->max_segs); if (ret) goto cleanup_queue; } @@ -296,27 +349,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); - goto free_bounce_sg; + goto cleanup_queue; } return 0; - free_bounce_sg: - kfree(mqrq_cur->bounce_sg); - mqrq_cur->bounce_sg = NULL; - kfree(mqrq_prev->bounce_sg); - mqrq_prev->bounce_sg = NULL; cleanup_queue: - kfree(mqrq_cur->sg); - mqrq_cur->sg = NULL; - kfree(mqrq_cur->bounce_buf); - mqrq_cur->bounce_buf = NULL; - - kfree(mqrq_prev->sg); - mqrq_prev->sg = NULL; - kfree(mqrq_prev->bounce_buf); - mqrq_prev->bounce_buf = NULL; - + mmc_queue_reqs_free_bufs(mq); + kfree(mq->mqrq); + mq->mqrq = NULL; +blk_cleanup: blk_cleanup_queue(mq->queue); return ret; } @@ -325,8 +367,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; - struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; - struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; /* Make sure the queue isn't suspended, as that will deadlock */ mmc_queue_resume(mq); @@ -340,71 +380,14 @@ void mmc_cleanup_queue(struct mmc_queue *mq) blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); - kfree(mqrq_cur->bounce_sg); - mqrq_cur->bounce_sg = NULL; - - kfree(mqrq_cur->sg); - mqrq_cur->sg = NULL; - - kfree(mqrq_cur->bounce_buf); - mqrq_cur->bounce_buf = NULL; - - kfree(mqrq_prev->bounce_sg); - mqrq_prev->bounce_sg = NULL; - - kfree(mqrq_prev->sg); - mqrq_prev->sg = NULL; - - kfree(mqrq_prev->bounce_buf); - mqrq_prev->bounce_buf = NULL; + mmc_queue_reqs_free_bufs(mq); + kfree(mq->mqrq); + mq->mqrq = NULL; mq->card = NULL; } EXPORT_SYMBOL(mmc_cleanup_queue); -int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) -{ - struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; - struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; - int ret = 0; - - - mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); - if (!mqrq_cur->packed) { - pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", - mmc_card_name(card)); - ret = -ENOMEM; - goto out; - } - - mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); - if (!mqrq_prev->packed) { - pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", - mmc_card_name(card)); - kfree(mqrq_cur->packed); - mqrq_cur->packed = NULL; - ret = -ENOMEM; - goto out; - } - - INIT_LIST_HEAD(&mqrq_cur->packed->list); - INIT_LIST_HEAD(&mqrq_prev->packed->list); - -out: - return ret; -} - -void mmc_packed_clean(struct mmc_queue *mq) -{ - struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; - struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; - - kfree(mqrq_cur->packed); - mqrq_cur->packed = NULL; - kfree(mqrq_prev->packed); - mqrq_prev->packed = NULL; -} - /** * mmc_queue_suspend - suspend a MMC request queue * @mq: MMC queue to suspend @@ -449,41 +432,6 @@ void mmc_queue_resume(struct mmc_queue *mq) } } -static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, - struct mmc_packed *packed, - struct scatterlist *sg, - enum mmc_packed_type cmd_type) -{ - struct scatterlist *__sg = sg; - unsigned int sg_len = 0; - struct request *req; - - if (mmc_packed_wr(cmd_type)) { - unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; - unsigned int max_seg_sz = queue_max_segment_size(mq->queue); - unsigned int len, remain, offset = 0; - u8 *buf = (u8 *)packed->cmd_hdr; - - remain = hdr_sz; - do { - len = min(remain, max_seg_sz); - sg_set_buf(__sg, buf + offset, len); - offset += len; - remain -= len; - sg_unmark_end(__sg++); - sg_len++; - } while (remain); - } - - list_for_each_entry(req, &packed->list, queuelist) { - sg_len += blk_rq_map_sg(mq->queue, req, __sg); - __sg = sg + (sg_len - 1); - sg_unmark_end(__sg++); - } - sg_mark_end(sg + (sg_len - 1)); - return sg_len; -} - /* * Prepare the sg list(s) to be handed of to the host driver */ @@ -492,26 +440,12 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) unsigned int sg_len; size_t buflen; struct scatterlist *sg; - enum mmc_packed_type cmd_type; int i; - cmd_type = mqrq->cmd_type; - - if (!mqrq->bounce_buf) { - if (mmc_packed_cmd(cmd_type)) - return mmc_queue_packed_map_sg(mq, mqrq->packed, - mqrq->sg, cmd_type); - else - return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); - } - - BUG_ON(!mqrq->bounce_sg); + if (!mqrq->bounce_buf) + return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); - if (mmc_packed_cmd(cmd_type)) - sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, - mqrq->bounce_sg, cmd_type); - else - sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); + sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); mqrq->bounce_sg_len = sg_len; |