diff options
Diffstat (limited to 'drivers/mmc/core/block.c')
-rw-r--r-- | drivers/mmc/core/block.c | 130 |
1 files changed, 70 insertions, 60 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 663d87924e5e..7896952de1ac 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -70,7 +70,6 @@ MODULE_ALIAS("mmc:block"); * ample. */ #define MMC_BLK_TIMEOUT_MS (10 * 1000) -#define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) @@ -168,6 +167,11 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline int mmc_blk_part_switch(struct mmc_card *card, unsigned int part_type); +static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, + struct mmc_card *card, + int disable_multi, + struct mmc_queue *mq); +static void mmc_blk_hsq_req_done(struct mmc_request *mrq); static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { @@ -408,44 +412,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int ioctl_do_sanitize(struct mmc_card *card) -{ - int err; - - if (!mmc_can_sanitize(card)) { - pr_warn("%s: %s - SANITIZE is not supported\n", - mmc_hostname(card->host), __func__); - err = -EOPNOTSUPP; - goto out; - } - - pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", - mmc_hostname(card->host), __func__); - - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_SANITIZE_START, 1, - MMC_SANITIZE_REQ_TIMEOUT); - - if (err) - pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", - mmc_hostname(card->host), __func__, err); - - pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), - __func__); -out: - return err; -} - -static inline bool mmc_blk_in_tran_state(u32 status) -{ - /* - * Some cards mishandle the status bits, so make sure to check both the - * busy indication and the card state. - */ - return status & R1_READY_FOR_DATA && - (R1_CURRENT_STATE(status) == R1_STATE_TRAN); -} - static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, u32 *resp_errs) { @@ -477,13 +443,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, __func__, status); return -ETIMEDOUT; } - - /* - * Some cards mishandle the status bits, - * so make sure to check both the busy - * indication and the card state. - */ - } while (!mmc_blk_in_tran_state(status)); + } while (!mmc_ready_for_data(status)); return err; } @@ -580,15 +540,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, } if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && - (cmd.opcode == MMC_SWITCH)) { - err = ioctl_do_sanitize(card); - - if (err) - pr_err("%s: ioctl_do_sanitize() failed. err = %d", - __func__, err); - - return err; - } + (cmd.opcode == MMC_SWITCH)) + return mmc_sanitize(card); mmc_wait_for_req(card->host, &mrq); @@ -1417,6 +1370,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) struct mmc_request *mrq = &mqrq->brq.mrq; struct request_queue *q = req->q; struct mmc_host *host = mq->card->host; + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); unsigned long flags; bool put_card; int err; @@ -1446,7 +1400,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) spin_lock_irqsave(&mq->lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); @@ -1532,9 +1486,30 @@ static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) return mmc_blk_cqe_start_req(mq->card->host, mrq); } +static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_host *host = mq->card->host; + int err; + + mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); + mqrq->brq.mrq.done = mmc_blk_hsq_req_done; + mmc_pre_req(host, &mqrq->brq.mrq); + + err = mmc_cqe_start_req(host, &mqrq->brq.mrq); + if (err) + mmc_post_req(host, &mqrq->brq.mrq, err); + + return err; +} + static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); + struct mmc_host *host = mq->card->host; + + if (host->hsq_enabled) + return mmc_blk_hsq_issue_rw_rq(mq, req); mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); @@ -1666,7 +1641,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) goto error_exit; if (!mmc_host_is_spi(host) && - !mmc_blk_in_tran_state(status)) { + !mmc_ready_for_data(status)) { err = mmc_blk_fix_state(card, req); if (err) goto error_exit; @@ -1726,7 +1701,7 @@ static bool mmc_blk_status_error(struct request *req, u32 status) return brq->cmd.resp[0] & CMD_ERRORS || brq->stop.resp[0] & stop_err_bits || status & stop_err_bits || - (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status)); + (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); } static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) @@ -1788,7 +1763,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) /* Try to get back to "tran" state */ if (!mmc_host_is_spi(mq->card->host) && - (err || !mmc_blk_in_tran_state(status))) + (err || !mmc_ready_for_data(status))) err = mmc_blk_fix_state(mq->card, req); /* @@ -1920,6 +1895,41 @@ static void mmc_blk_urgent_bkops(struct mmc_queue *mq, mmc_run_bkops(mq->card); } +static void mmc_blk_hsq_req_done(struct mmc_request *mrq) +{ + struct mmc_queue_req *mqrq = + container_of(mrq, struct mmc_queue_req, brq.mrq); + struct request *req = mmc_queue_req_to_req(mqrq); + struct request_queue *q = req->q; + struct mmc_queue *mq = q->queuedata; + struct mmc_host *host = mq->card->host; + unsigned long flags; + + if (mmc_blk_rq_error(&mqrq->brq) || + mmc_blk_urgent_bkops_needed(mq, mqrq)) { + spin_lock_irqsave(&mq->lock, flags); + mq->recovery_needed = true; + mq->recovery_req = req; + spin_unlock_irqrestore(&mq->lock, flags); + + host->cqe_ops->cqe_recovery_start(host); + + schedule_work(&mq->recovery_work); + return; + } + + mmc_blk_rw_reset_success(mq, req); + + /* + * Block layer timeouts race with completions which means the normal + * completion path cannot be used during recovery. + */ + if (mq->in_recovery) + mmc_blk_cqe_complete_rq(mq, req); + else + blk_mq_complete_request(req); +} + void mmc_blk_mq_complete(struct request *req) { struct mmc_queue *mq = req->q->queuedata; @@ -2474,8 +2484,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, struct mmc_rpmb_data, chrdev); - put_device(&rpmb->dev); mmc_blk_put(rpmb->md); + put_device(&rpmb->dev); return 0; } |