summaryrefslogtreecommitdiff
path: root/drivers/mmc/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/core')
-rw-r--r--drivers/mmc/core/Kconfig18
-rw-r--r--drivers/mmc/core/block.c310
-rw-r--r--drivers/mmc/core/core.c214
-rw-r--r--drivers/mmc/core/host.c74
-rw-r--r--drivers/mmc/core/mmc.c18
-rw-r--r--drivers/mmc/core/mmc_ops.c260
-rw-r--r--drivers/mmc/core/mmc_ops.h5
-rw-r--r--drivers/mmc/core/mmc_test.c2
-rw-r--r--drivers/mmc/core/pwrseq.c8
-rw-r--r--drivers/mmc/core/pwrseq.h3
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c2
-rw-r--r--drivers/mmc/core/queue.c245
-rw-r--r--drivers/mmc/core/queue.h47
-rw-r--r--drivers/mmc/core/sd.c22
-rw-r--r--drivers/mmc/core/sdio.c24
-rw-r--r--drivers/mmc/core/sdio_irq.c22
-rw-r--r--drivers/mmc/core/sdio_ops.h2
-rw-r--r--drivers/mmc/core/slot-gpio.c2
18 files changed, 616 insertions, 662 deletions
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index fc1ecdaaa9ca..42e89060cd41 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -61,24 +61,6 @@ config MMC_BLOCK_MINORS
If unsure, say 8 here.
-config MMC_BLOCK_BOUNCE
- bool "Use bounce buffer for simple hosts"
- depends on MMC_BLOCK
- default y
- help
- SD/MMC is a high latency protocol where it is crucial to
- send large requests in order to get high performance. Many
- controllers, however, are restricted to continuous memory
- (i.e. they can't do scatter-gather), something the kernel
- rarely can provide.
-
- Say Y here to help these restricted hosts by bouncing
- requests back and forth from a large buffer. You will get
- a big performance gain at the cost of up to 64 KiB of
- physical memory.
-
- If unsure, say Y here.
-
config SDIO_UART
tristate "SDIO UART/GPS class support"
depends on TTY
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 8273b078686d..8ac59dc80f23 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -127,14 +127,6 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md);
-static int get_card_status(struct mmc_card *card, u32 *status, int retries);
-
-static void mmc_blk_requeue(struct request_queue *q, struct request *req)
-{
- spin_lock_irq(q->queue_lock);
- blk_requeue_request(q, req);
- spin_unlock_irq(q->queue_lock);
-}
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@@ -197,6 +189,8 @@ static ssize_t power_ro_lock_store(struct device *dev,
int ret;
struct mmc_blk_data *md, *part_md;
struct mmc_card *card;
+ struct mmc_queue *mq;
+ struct request *req;
unsigned long set;
if (kstrtoul(buf, 0, &set))
@@ -206,20 +200,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
return count;
md = mmc_blk_get(dev_to_disk(dev));
+ mq = &md->queue;
card = md->queue.card;
- mmc_get_card(card);
-
- ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
- card->ext_csd.boot_ro_lock |
- EXT_CSD_BOOT_WP_B_PWR_WP_EN,
- card->ext_csd.part_time);
- if (ret)
- pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
- else
- card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
-
- mmc_put_card(card);
+ /* Dispatch locking to the block layer */
+ req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ret = req_to_mmc_queue_req(req)->drv_op_result;
if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n",
@@ -392,7 +380,7 @@ static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
return -EINVAL;
do {
- err = get_card_status(card, status, 5);
+ err = __mmc_send_status(card, status, 5);
if (err)
break;
@@ -450,7 +438,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_request mrq = {};
struct scatterlist sg;
int err;
- int is_rpmb = false;
+ bool is_rpmb = false;
u32 status = 0;
if (!card || !md || !idata)
@@ -570,9 +558,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_ioc_cmd __user *ic_ptr)
{
struct mmc_blk_ioc_data *idata;
+ struct mmc_blk_ioc_data *idatas[1];
struct mmc_blk_data *md;
+ struct mmc_queue *mq;
struct mmc_card *card;
int err = 0, ioc_err = 0;
+ struct request *req;
/*
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
@@ -598,17 +589,21 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_done;
}
- mmc_get_card(card);
-
- ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
-
- /* Always switch back to main area after RPMB access */
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
-
- mmc_put_card(card);
-
+ /*
+ * Dispatch the ioctl() into the block request queue.
+ */
+ mq = &md->queue;
+ req = blk_get_request(mq->queue,
+ idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ __GFP_RECLAIM);
+ idatas[0] = idata;
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->idata = idatas;
+ req_to_mmc_queue_req(req)->ioc_count = 1;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+ blk_put_request(req);
cmd_done:
mmc_blk_put(md);
@@ -625,8 +620,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
struct mmc_ioc_cmd __user *cmds = user->cmds;
struct mmc_card *card;
struct mmc_blk_data *md;
+ struct mmc_queue *mq;
int i, err = 0, ioc_err = 0;
__u64 num_of_cmds;
+ struct request *req;
/*
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
@@ -640,6 +637,9 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
sizeof(num_of_cmds)))
return -EFAULT;
+ if (!num_of_cmds)
+ return 0;
+
if (num_of_cmds > MMC_IOC_MAX_CMDS)
return -EINVAL;
@@ -668,21 +668,26 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
goto cmd_done;
}
- mmc_get_card(card);
-
- for (i = 0; i < num_of_cmds && !ioc_err; i++)
- ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
- /* Always switch back to main area after RPMB access */
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
-
- mmc_put_card(card);
+ /*
+ * Dispatch the ioctl()s into the block request queue.
+ */
+ mq = &md->queue;
+ req = blk_get_request(mq->queue,
+ idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ __GFP_RECLAIM);
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->idata = idata;
+ req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
/* copy to user if data and response */
for (i = 0; i < num_of_cmds && !err; i++)
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
+ blk_put_request(req);
+
cmd_done:
mmc_blk_put(md);
cmd_err:
@@ -852,21 +857,6 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
return 0;
}
-static int get_card_status(struct mmc_card *card, u32 *status, int retries)
-{
- struct mmc_command cmd = {};
- int err;
-
- cmd.opcode = MMC_SEND_STATUS;
- if (!mmc_host_is_spi(card->host))
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, retries);
- if (err == 0)
- *status = cmd.resp[0];
- return err;
-}
-
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
bool hw_busy_detect, struct request *req, bool *gen_err)
{
@@ -875,7 +865,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
u32 status;
do {
- err = get_card_status(card, &status, 5);
+ err = __mmc_send_status(card, &status, 5);
if (err) {
pr_err("%s: error %d requesting status\n",
req->rq_disk->disk_name, err);
@@ -1043,7 +1033,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
* we can't be sure the returned status is for the r/w command.
*/
for (retry = 2; retry >= 0; retry--) {
- err = get_card_status(card, &status, 0);
+ err = __mmc_send_status(card, &status, 0);
if (!err)
break;
@@ -1178,15 +1168,64 @@ int mmc_access_rpmb(struct mmc_queue *mq)
return false;
}
+/*
+ * The non-block commands come back from the block layer after it queued it and
+ * processed it with all other requests and then they get issued in this
+ * function.
+ */
+static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mq_rq;
+ struct mmc_card *card = mq->card;
+ struct mmc_blk_data *md = mq->blkdata;
+ int ret;
+ int i;
+
+ mq_rq = req_to_mmc_queue_req(req);
+
+ switch (mq_rq->drv_op) {
+ case MMC_DRV_OP_IOCTL:
+ for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
+ ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]);
+ if (ret)
+ break;
+ }
+ /* Always switch back to main area after RPMB access */
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+ break;
+ case MMC_DRV_OP_BOOT_WP:
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+ card->ext_csd.boot_ro_lock |
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
+ md->disk->disk_name, ret);
+ else
+ card->ext_csd.boot_ro_lock |=
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+ break;
+ default:
+ pr_err("%s: unknown driver specific operation\n",
+ md->disk->disk_name);
+ ret = -EINVAL;
+ break;
+ }
+ mq_rq->drv_op_result = ret;
+ blk_end_request_all(req, ret);
+}
+
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_DISCARD;
+ blk_status_t status = BLK_STS_OK;
if (!mmc_can_erase(card)) {
- err = -EOPNOTSUPP;
+ status = BLK_STS_NOTSUPP;
goto fail;
}
@@ -1212,10 +1251,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
if (!err)
err = mmc_erase(card, from, nr, arg);
} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
- if (!err)
+ if (err)
+ status = BLK_STS_IOERR;
+ else
mmc_blk_reset_success(md, type);
fail:
- blk_end_request(req, err, blk_rq_bytes(req));
+ blk_end_request(req, status, blk_rq_bytes(req));
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1225,9 +1266,10 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
+ blk_status_t status = BLK_STS_OK;
if (!(mmc_can_secure_erase_trim(card))) {
- err = -EOPNOTSUPP;
+ status = BLK_STS_NOTSUPP;
goto out;
}
@@ -1254,8 +1296,10 @@ retry:
err = mmc_erase(card, from, nr, arg);
if (err == -EIO)
goto out_retry;
- if (err)
+ if (err) {
+ status = BLK_STS_IOERR;
goto out;
+ }
if (arg == MMC_SECURE_TRIM1_ARG) {
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
@@ -1270,8 +1314,10 @@ retry:
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
if (err == -EIO)
goto out_retry;
- if (err)
+ if (err) {
+ status = BLK_STS_IOERR;
goto out;
+ }
}
out_retry:
@@ -1280,7 +1326,7 @@ out_retry:
if (!err)
mmc_blk_reset_success(md, type);
out:
- blk_end_request(req, err, blk_rq_bytes(req));
+ blk_end_request(req, status, blk_rq_bytes(req));
}
static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1290,10 +1336,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
int ret = 0;
ret = mmc_flush_cache(card);
- if (ret)
- ret = -EIO;
-
- blk_end_request_all(req, ret);
+ blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
/*
@@ -1324,16 +1367,25 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
R1_ADDRESS_ERROR | /* Misaligned address */ \
R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
R1_WP_VIOLATION | /* Tried to write to protected block */ \
+ R1_CARD_ECC_FAILED | /* Card ECC failed */ \
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
+static bool mmc_blk_has_cmd_err(struct mmc_command *cmd)
+{
+ if (!cmd->error && cmd->resp[0] & CMD_ERRORS)
+ cmd->error = -EIO;
+
+ return cmd->error;
+}
+
static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
struct mmc_async_req *areq)
{
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
areq);
struct mmc_blk_request *brq = &mq_mrq->brq;
- struct request *req = mq_mrq->req;
+ struct request *req = mmc_queue_req_to_req(mq_mrq);
int need_retune = card->host->need_retune;
bool ecc_err = false;
bool gen_err = false;
@@ -1348,7 +1400,7 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
* stop.error indicates a problem with the stop command. Data
* may have been transferred, or may still be transferring.
*/
- if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) ||
brq->data.error) {
switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
case ERR_RETRY:
@@ -1402,7 +1454,8 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_RETRY;
}
- if (brq->data.error) {
+ /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
+ if (brq->data.error || brq->stop.error) {
if (need_retune && !brq->retune_retry_done) {
pr_debug("%s: retrying because a re-tune was needed\n",
req->rq_disk->disk_name);
@@ -1410,7 +1463,7 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_RETRY;
}
pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->data.error,
+ req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
(unsigned)blk_rq_pos(req),
(unsigned)blk_rq_sectors(req),
brq->cmd.resp[0], brq->stop.resp[0]);
@@ -1440,7 +1493,7 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
- struct request *req = mqrq->req;
+ struct request *req = mmc_queue_req_to_req(mqrq);
/*
* Reliable writes are used to implement Forced Unit Access and
@@ -1545,7 +1598,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
{
u32 readcmd, writecmd;
struct mmc_blk_request *brq = &mqrq->brq;
- struct request *req = mqrq->req;
+ struct request *req = mmc_queue_req_to_req(mqrq);
struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag;
@@ -1641,8 +1694,8 @@ static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
{
if (mmc_card_removed(card))
req->rq_flags |= RQF_QUIET;
- while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
- mmc_queue_req_free(mq, mqrq);
+ while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
+ mq->qcnt--;
}
/**
@@ -1661,8 +1714,8 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
*/
if (mmc_card_removed(mq->card)) {
req->rq_flags |= RQF_QUIET;
- blk_end_request_all(req, -EIO);
- mmc_queue_req_free(mq, mqrq);
+ blk_end_request_all(req, BLK_STS_IOERR);
+ mq->qcnt--; /* FIXME: just set to 0? */
return;
}
/* Else proceed and try to restart the current async request */
@@ -1685,12 +1738,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
bool req_pending = true;
if (new_req) {
- mqrq_cur = mmc_queue_req_find(mq, new_req);
- if (!mqrq_cur) {
- WARN_ON(1);
- mmc_blk_requeue(mq->queue, new_req);
- new_req = NULL;
- }
+ mqrq_cur = req_to_mmc_queue_req(new_req);
+ mq->qcnt++;
}
if (!mq->qcnt)
@@ -1731,7 +1780,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
*/
mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
brq = &mq_rq->brq;
- old_req = mq_rq->req;
+ old_req = mmc_queue_req_to_req(mq_rq);
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
@@ -1743,7 +1792,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
*/
mmc_blk_reset_success(md, type);
- req_pending = blk_end_request(old_req, 0,
+ req_pending = blk_end_request(old_req, BLK_STS_OK,
brq->data.bytes_xfered);
/*
* If the blk_end_request function returns non-zero even
@@ -1764,12 +1813,12 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
if (req_pending)
mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
else
- mmc_queue_req_free(mq, mq_rq);
+ mq->qcnt--;
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return;
}
if (!req_pending) {
- mmc_queue_req_free(mq, mq_rq);
+ mq->qcnt--;
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return;
}
@@ -1811,10 +1860,10 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
* time, so we only reach here after trying to
* read a single sector.
*/
- req_pending = blk_end_request(old_req, -EIO,
+ req_pending = blk_end_request(old_req, BLK_STS_IOERR,
brq->data.blksz);
if (!req_pending) {
- mmc_queue_req_free(mq, mq_rq);
+ mq->qcnt--;
mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
return;
}
@@ -1844,7 +1893,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
}
} while (req_pending);
- mmc_queue_req_free(mq, mq_rq);
+ mq->qcnt--;
}
void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
@@ -1860,28 +1909,59 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_part_switch(card, md);
if (ret) {
if (req) {
- blk_end_request_all(req, -EIO);
+ blk_end_request_all(req, BLK_STS_IOERR);
}
goto out;
}
- if (req && req_op(req) == REQ_OP_DISCARD) {
- /* complete ongoing async transfer before issuing discard */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
- mmc_blk_issue_discard_rq(mq, req);
- } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
- /* complete ongoing async transfer before issuing secure erase*/
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
- mmc_blk_issue_secdiscard_rq(mq, req);
- } else if (req && req_op(req) == REQ_OP_FLUSH) {
- /* complete ongoing async transfer before issuing flush */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
- mmc_blk_issue_flush(mq, req);
+ if (req) {
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ /*
+ * Complete ongoing async transfer before issuing
+ * ioctl()s
+ */
+ if (mq->qcnt)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ mmc_blk_issue_drv_op(mq, req);
+ break;
+ case REQ_OP_DISCARD:
+ /*
+ * Complete ongoing async transfer before issuing
+ * discard.
+ */
+ if (mq->qcnt)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ mmc_blk_issue_discard_rq(mq, req);
+ break;
+ case REQ_OP_SECURE_ERASE:
+ /*
+ * Complete ongoing async transfer before issuing
+ * secure erase.
+ */
+ if (mq->qcnt)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ mmc_blk_issue_secdiscard_rq(mq, req);
+ break;
+ case REQ_OP_FLUSH:
+ /*
+ * Complete ongoing async transfer before issuing
+ * flush.
+ */
+ if (mq->qcnt)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ mmc_blk_issue_flush(mq, req);
+ break;
+ default:
+ /* Normal request, just issue it */
+ mmc_blk_issue_rw_rq(mq, req);
+ card->host->context_info.is_waiting_last_req = false;
+ break;
+ }
} else {
- mmc_blk_issue_rw_rq(mq, req);
+ /* No request, flushing the pipeline with NULL */
+ mmc_blk_issue_rw_rq(mq, NULL);
card->host->context_info.is_waiting_last_req = false;
}
@@ -2090,6 +2170,7 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
+ blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
@@ -2166,7 +2247,6 @@ static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md, *part_md;
char cap_str[10];
- int ret;
/*
* Check that the card supports the command class(es) we need.
@@ -2176,15 +2256,9 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_fixup_device(card, mmc_blk_fixups);
- ret = mmc_queue_alloc_shared_queue(card);
- if (ret)
- return ret;
-
md = mmc_blk_alloc(card);
- if (IS_ERR(md)) {
- mmc_queue_free_shared_queue(card);
+ if (IS_ERR(md))
return PTR_ERR(md);
- }
string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
@@ -2222,7 +2296,6 @@ static int mmc_blk_probe(struct mmc_card *card)
out:
mmc_blk_remove_parts(card, md);
mmc_blk_remove_req(md);
- mmc_queue_free_shared_queue(card);
return 0;
}
@@ -2240,7 +2313,6 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
dev_set_drvdata(&card->dev, NULL);
- mmc_queue_free_shared_queue(card);
}
static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 82c45ddfa202..26431267a3e2 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -53,12 +53,6 @@
/* If the device is not responding */
#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
-/*
- * Background operations can take a long time, depending on the housekeeping
- * operations the card has to perform.
- */
-#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
-
/* The max erase timeout, used when host->max_busy_timeout isn't specified */
#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
@@ -362,74 +356,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
-/**
- * mmc_start_bkops - start BKOPS for supported cards
- * @card: MMC card to start BKOPS
- * @form_exception: A flag to indicate if this function was
- * called due to an exception raised by the card
- *
- * Start background operations whenever requested.
- * When the urgent BKOPS bit is set in a R1 command response
- * then background operations should be started immediately.
-*/
-void mmc_start_bkops(struct mmc_card *card, bool from_exception)
-{
- int err;
- int timeout;
- bool use_busy_signal;
-
- if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
- return;
-
- err = mmc_read_bkops_status(card);
- if (err) {
- pr_err("%s: Failed to read bkops status: %d\n",
- mmc_hostname(card->host), err);
- return;
- }
-
- if (!card->ext_csd.raw_bkops_status)
- return;
-
- if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
- from_exception)
- return;
-
- mmc_claim_host(card->host);
- if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
- timeout = MMC_BKOPS_MAX_TIMEOUT;
- use_busy_signal = true;
- } else {
- timeout = 0;
- use_busy_signal = false;
- }
-
- mmc_retune_hold(card->host);
-
- err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, timeout, 0,
- use_busy_signal, true, false);
- if (err) {
- pr_warn("%s: Error %d starting bkops\n",
- mmc_hostname(card->host), err);
- mmc_retune_release(card->host);
- goto out;
- }
-
- /*
- * For urgent bkops status (LEVEL_2 and more)
- * bkops executed synchronously, otherwise
- * the operation is in progress
- */
- if (!use_busy_signal)
- mmc_card_set_doing_bkops(card);
- else
- mmc_retune_release(card->host);
-out:
- mmc_release_host(card->host);
-}
-EXPORT_SYMBOL(mmc_start_bkops);
-
/*
* mmc_wait_data_done() - done callback for data request
* @mrq: done data request
@@ -749,71 +675,6 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
EXPORT_SYMBOL(mmc_wait_for_req);
/**
- * mmc_interrupt_hpi - Issue for High priority Interrupt
- * @card: the MMC card associated with the HPI transfer
- *
- * Issued High Priority Interrupt, and check for card status
- * until out-of prg-state.
- */
-int mmc_interrupt_hpi(struct mmc_card *card)
-{
- int err;
- u32 status;
- unsigned long prg_wait;
-
- if (!card->ext_csd.hpi_en) {
- pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
- return 1;
- }
-
- mmc_claim_host(card->host);
- err = mmc_send_status(card, &status);
- if (err) {
- pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
- goto out;
- }
-
- switch (R1_CURRENT_STATE(status)) {
- case R1_STATE_IDLE:
- case R1_STATE_READY:
- case R1_STATE_STBY:
- case R1_STATE_TRAN:
- /*
- * In idle and transfer states, HPI is not needed and the caller
- * can issue the next intended command immediately
- */
- goto out;
- case R1_STATE_PRG:
- break;
- default:
- /* In all other states, it's illegal to issue HPI */
- pr_debug("%s: HPI cannot be sent. Card state=%d\n",
- mmc_hostname(card->host), R1_CURRENT_STATE(status));
- err = -EINVAL;
- goto out;
- }
-
- err = mmc_send_hpi_cmd(card, &status);
- if (err)
- goto out;
-
- prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
- do {
- err = mmc_send_status(card, &status);
-
- if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
- break;
- if (time_after(jiffies, prg_wait))
- err = -ETIMEDOUT;
- } while (!err);
-
-out:
- mmc_release_host(card->host);
- return err;
-}
-EXPORT_SYMBOL(mmc_interrupt_hpi);
-
-/**
* mmc_wait_for_cmd - start a command and wait for completion
* @host: MMC host to start command
* @cmd: MMC command to start
@@ -843,53 +704,6 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
EXPORT_SYMBOL(mmc_wait_for_cmd);
/**
- * mmc_stop_bkops - stop ongoing BKOPS
- * @card: MMC card to check BKOPS
- *
- * Send HPI command to stop ongoing background operations to
- * allow rapid servicing of foreground operations, e.g. read/
- * writes. Wait until the card comes out of the programming state
- * to avoid errors in servicing read/write requests.
- */
-int mmc_stop_bkops(struct mmc_card *card)
-{
- int err = 0;
-
- err = mmc_interrupt_hpi(card);
-
- /*
- * If err is EINVAL, we can't issue an HPI.
- * It should complete the BKOPS.
- */
- if (!err || (err == -EINVAL)) {
- mmc_card_clr_doing_bkops(card);
- mmc_retune_release(card->host);
- err = 0;
- }
-
- return err;
-}
-EXPORT_SYMBOL(mmc_stop_bkops);
-
-int mmc_read_bkops_status(struct mmc_card *card)
-{
- int err;
- u8 *ext_csd;
-
- mmc_claim_host(card->host);
- err = mmc_get_ext_csd(card, &ext_csd);
- mmc_release_host(card->host);
- if (err)
- return err;
-
- card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
- card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
- kfree(ext_csd);
- return 0;
-}
-EXPORT_SYMBOL(mmc_read_bkops_status);
-
-/**
* mmc_set_data_timeout - set the timeout for a data command
* @data: data phase for command
* @card: the MMC card associated with the data transfer
@@ -2597,6 +2411,8 @@ EXPORT_SYMBOL(mmc_set_blockcount);
static void mmc_hw_reset_for_init(struct mmc_host *host)
{
+ mmc_pwrseq_reset(host);
+
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return;
host->ops->hw_reset(host);
@@ -2836,8 +2652,11 @@ void mmc_stop_host(struct mmc_host *host)
host->removed = 1;
spin_unlock_irqrestore(&host->lock, flags);
#endif
- if (host->slot.cd_irq >= 0)
+ if (host->slot.cd_irq >= 0) {
+ if (host->slot.cd_wake_enabled)
+ disable_irq_wake(host->slot.cd_irq);
disable_irq(host->slot.cd_irq);
+ }
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
@@ -2913,27 +2732,6 @@ int mmc_power_restore_host(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_power_restore_host);
-/*
- * Flush the cache to the non-volatile storage.
- */
-int mmc_flush_cache(struct mmc_card *card)
-{
- int err = 0;
-
- if (mmc_card_mmc(card) &&
- (card->ext_csd.cache_size > 0) &&
- (card->ext_csd.cache_ctrl & 1)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1, 0);
- if (err)
- pr_err("%s: cache flush error %d\n",
- mmc_hostname(card->host), err);
- }
-
- return err;
-}
-EXPORT_SYMBOL(mmc_flush_cache);
-
#ifdef CONFIG_PM_SLEEP
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 3f8c85d5aa09..1503412f826c 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -30,6 +30,7 @@
#include "host.h"
#include "slot-gpio.h"
#include "pwrseq.h"
+#include "sdio_ops.h"
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
@@ -176,19 +177,17 @@ static void mmc_retune_timer(unsigned long data)
*/
int mmc_of_parse(struct mmc_host *host)
{
- struct device_node *np;
+ struct device *dev = host->parent;
u32 bus_width;
int ret;
bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false;
- if (!host->parent || !host->parent->of_node)
+ if (!dev || !dev_fwnode(dev))
return 0;
- np = host->parent->of_node;
-
/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
- if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
+ if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
dev_dbg(host->parent,
"\"bus-width\" property is missing, assuming 1 bit.\n");
bus_width = 1;
@@ -210,7 +209,7 @@ int mmc_of_parse(struct mmc_host *host)
}
/* f_max is obtained from the optional "max-frequency" property */
- of_property_read_u32(np, "max-frequency", &host->f_max);
+ device_property_read_u32(dev, "max-frequency", &host->f_max);
/*
* Configure CD and WP pins. They are both by default active low to
@@ -225,12 +224,12 @@ int mmc_of_parse(struct mmc_host *host)
*/
/* Parse Card Detection */
- if (of_property_read_bool(np, "non-removable")) {
+ if (device_property_read_bool(dev, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
- cd_cap_invert = of_property_read_bool(np, "cd-inverted");
+ cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
- if (of_property_read_bool(np, "broken-cd"))
+ if (device_property_read_bool(dev, "broken-cd"))
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
@@ -256,7 +255,7 @@ int mmc_of_parse(struct mmc_host *host)
}
/* Parse Write Protection */
- ro_cap_invert = of_property_read_bool(np, "wp-inverted");
+ ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
if (!ret)
@@ -264,64 +263,64 @@ int mmc_of_parse(struct mmc_host *host)
else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
- if (of_property_read_bool(np, "disable-wp"))
+ if (device_property_read_bool(dev, "disable-wp"))
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
/* See the comment on CD inversion above */
if (ro_cap_invert ^ ro_gpio_invert)
host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- if (of_property_read_bool(np, "cap-sd-highspeed"))
+ if (device_property_read_bool(dev, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED;
- if (of_property_read_bool(np, "cap-mmc-highspeed"))
+ if (device_property_read_bool(dev, "cap-mmc-highspeed"))
host->caps |= MMC_CAP_MMC_HIGHSPEED;
- if (of_property_read_bool(np, "sd-uhs-sdr12"))
+ if (device_property_read_bool(dev, "sd-uhs-sdr12"))
host->caps |= MMC_CAP_UHS_SDR12;
- if (of_property_read_bool(np, "sd-uhs-sdr25"))
+ if (device_property_read_bool(dev, "sd-uhs-sdr25"))
host->caps |= MMC_CAP_UHS_SDR25;
- if (of_property_read_bool(np, "sd-uhs-sdr50"))
+ if (device_property_read_bool(dev, "sd-uhs-sdr50"))
host->caps |= MMC_CAP_UHS_SDR50;
- if (of_property_read_bool(np, "sd-uhs-sdr104"))
+ if (device_property_read_bool(dev, "sd-uhs-sdr104"))
host->caps |= MMC_CAP_UHS_SDR104;
- if (of_property_read_bool(np, "sd-uhs-ddr50"))
+ if (device_property_read_bool(dev, "sd-uhs-ddr50"))
host->caps |= MMC_CAP_UHS_DDR50;
- if (of_property_read_bool(np, "cap-power-off-card"))
+ if (device_property_read_bool(dev, "cap-power-off-card"))
host->caps |= MMC_CAP_POWER_OFF_CARD;
- if (of_property_read_bool(np, "cap-mmc-hw-reset"))
+ if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
host->caps |= MMC_CAP_HW_RESET;
- if (of_property_read_bool(np, "cap-sdio-irq"))
+ if (device_property_read_bool(dev, "cap-sdio-irq"))
host->caps |= MMC_CAP_SDIO_IRQ;
- if (of_property_read_bool(np, "full-pwr-cycle"))
+ if (device_property_read_bool(dev, "full-pwr-cycle"))
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
- if (of_property_read_bool(np, "keep-power-in-suspend"))
+ if (device_property_read_bool(dev, "keep-power-in-suspend"))
host->pm_caps |= MMC_PM_KEEP_POWER;
- if (of_property_read_bool(np, "wakeup-source") ||
- of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
+ if (device_property_read_bool(dev, "wakeup-source") ||
+ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
- if (of_property_read_bool(np, "mmc-ddr-3_3v"))
+ if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
host->caps |= MMC_CAP_3_3V_DDR;
- if (of_property_read_bool(np, "mmc-ddr-1_8v"))
+ if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
host->caps |= MMC_CAP_1_8V_DDR;
- if (of_property_read_bool(np, "mmc-ddr-1_2v"))
+ if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
host->caps |= MMC_CAP_1_2V_DDR;
- if (of_property_read_bool(np, "mmc-hs200-1_8v"))
+ if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
- if (of_property_read_bool(np, "mmc-hs200-1_2v"))
+ if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
- if (of_property_read_bool(np, "mmc-hs400-1_8v"))
+ if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
- if (of_property_read_bool(np, "mmc-hs400-1_2v"))
+ if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
- if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
+ if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
host->caps2 |= MMC_CAP2_HS400_ES;
- if (of_property_read_bool(np, "no-sdio"))
+ if (device_property_read_bool(dev, "no-sdio"))
host->caps2 |= MMC_CAP2_NO_SDIO;
- if (of_property_read_bool(np, "no-sd"))
+ if (device_property_read_bool(dev, "no-sd"))
host->caps2 |= MMC_CAP2_NO_SD;
- if (of_property_read_bool(np, "no-mmc"))
+ if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
- host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
+ host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
dev_err(host->parent,
"device tree specified broken value for DSR: 0x%x, ignoring\n",
@@ -379,6 +378,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
+ INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
/*
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2c87dede5841..4ffea14b7eb6 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -27,6 +27,7 @@
#include "mmc_ops.h"
#include "quirks.h"
#include "sd_ops.h"
+#include "pwrseq.h"
#define DEFAULT_CMD6_TIMEOUT_MS 500
@@ -1555,10 +1556,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
/*
* Fetch CID from card.
*/
- if (mmc_host_is_spi(host))
- err = mmc_send_cid(host, cid);
- else
- err = mmc_all_send_cid(host, cid);
+ err = mmc_send_cid(host, cid);
if (err)
goto err;
@@ -1653,12 +1651,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_set_erase_size(card);
}
- /*
- * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
- * bit. This bit will be lost every time after a reset or power off.
- */
- if (card->ext_csd.partition_setting_completed ||
- (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
+ /* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
+ if (card->ext_csd.rev >= 3) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_ERASE_GROUP_DEF, 1,
card->ext_csd.generic_cmd6_time);
@@ -2096,7 +2090,7 @@ static int mmc_runtime_resume(struct mmc_host *host)
return 0;
}
-int mmc_can_reset(struct mmc_card *card)
+static int mmc_can_reset(struct mmc_card *card)
{
u8 rst_n_function;
@@ -2105,7 +2099,6 @@ int mmc_can_reset(struct mmc_card *card)
return 0;
return 1;
}
-EXPORT_SYMBOL(mmc_can_reset);
static int mmc_reset(struct mmc_host *host)
{
@@ -2127,6 +2120,7 @@ static int mmc_reset(struct mmc_host *host)
} else {
/* Do a brute force power cycle */
mmc_power_cycle(host, card->ocr);
+ mmc_pwrseq_reset(host);
}
return mmc_init_card(host, card->ocr, card);
}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 78f75f00efc5..5f7c5920231a 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -19,6 +19,7 @@
#include <linux/mmc/mmc.h>
#include "core.h"
+#include "card.h"
#include "host.h"
#include "mmc_ops.h"
@@ -54,7 +55,7 @@ static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
-int mmc_send_status(struct mmc_card *card, u32 *status)
+int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
{
int err;
struct mmc_command cmd = {};
@@ -64,7 +65,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+ err = mmc_wait_for_cmd(card->host, &cmd, retries);
if (err)
return err;
@@ -76,6 +77,12 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
return 0;
}
+EXPORT_SYMBOL_GPL(__mmc_send_status);
+
+int mmc_send_status(struct mmc_card *card, u32 *status)
+{
+ return __mmc_send_status(card, status, MMC_CMD_RETRIES);
+}
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
@@ -200,24 +207,6 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
return err;
}
-int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
-{
- int err;
- struct mmc_command cmd = {};
-
- cmd.opcode = MMC_ALL_SEND_CID;
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
-
- err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- memcpy(cid, cmd.resp, sizeof(u32) * 4);
-
- return 0;
-}
-
int mmc_set_relative_addr(struct mmc_card *card)
{
struct mmc_command cmd = {};
@@ -302,15 +291,11 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
return 0;
}
-int mmc_send_csd(struct mmc_card *card, u32 *csd)
+static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
{
int ret, i;
__be32 *csd_tmp;
- if (!mmc_host_is_spi(card->host))
- return mmc_send_cxd_native(card->host, card->rca << 16,
- csd, MMC_SEND_CSD);
-
csd_tmp = kzalloc(16, GFP_KERNEL);
if (!csd_tmp)
return -ENOMEM;
@@ -327,18 +312,20 @@ err:
return ret;
}
-int mmc_send_cid(struct mmc_host *host, u32 *cid)
+int mmc_send_csd(struct mmc_card *card, u32 *csd)
+{
+ if (mmc_host_is_spi(card->host))
+ return mmc_spi_send_csd(card, csd);
+
+ return mmc_send_cxd_native(card->host, card->rca << 16, csd,
+ MMC_SEND_CSD);
+}
+
+static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
{
int ret, i;
__be32 *cid_tmp;
- if (!mmc_host_is_spi(host)) {
- if (!host->card)
- return -EINVAL;
- return mmc_send_cxd_native(host, host->card->rca << 16,
- cid, MMC_SEND_CID);
- }
-
cid_tmp = kzalloc(16, GFP_KERNEL);
if (!cid_tmp)
return -ENOMEM;
@@ -355,6 +342,14 @@ err:
return ret;
}
+int mmc_send_cid(struct mmc_host *host, u32 *cid)
+{
+ if (mmc_host_is_spi(host))
+ return mmc_spi_send_cid(host, cid);
+
+ return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
+}
+
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
{
int err;
@@ -800,7 +795,7 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
}
-int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
+static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
{
struct mmc_command cmd = {};
unsigned int opcode;
@@ -834,11 +829,208 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
return 0;
}
+/**
+ * mmc_interrupt_hpi - Issue for High priority Interrupt
+ * @card: the MMC card associated with the HPI transfer
+ *
+ * Issued High Priority Interrupt, and check for card status
+ * until out-of prg-state.
+ */
+int mmc_interrupt_hpi(struct mmc_card *card)
+{
+ int err;
+ u32 status;
+ unsigned long prg_wait;
+
+ if (!card->ext_csd.hpi_en) {
+ pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
+ return 1;
+ }
+
+ mmc_claim_host(card->host);
+ err = mmc_send_status(card, &status);
+ if (err) {
+ pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+ goto out;
+ }
+
+ switch (R1_CURRENT_STATE(status)) {
+ case R1_STATE_IDLE:
+ case R1_STATE_READY:
+ case R1_STATE_STBY:
+ case R1_STATE_TRAN:
+ /*
+ * In idle and transfer states, HPI is not needed and the caller
+ * can issue the next intended command immediately
+ */
+ goto out;
+ case R1_STATE_PRG:
+ break;
+ default:
+ /* In all other states, it's illegal to issue HPI */
+ pr_debug("%s: HPI cannot be sent. Card state=%d\n",
+ mmc_hostname(card->host), R1_CURRENT_STATE(status));
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mmc_send_hpi_cmd(card, &status);
+ if (err)
+ goto out;
+
+ prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
+ do {
+ err = mmc_send_status(card, &status);
+
+ if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
+ break;
+ if (time_after(jiffies, prg_wait))
+ err = -ETIMEDOUT;
+ } while (!err);
+
+out:
+ mmc_release_host(card->host);
+ return err;
+}
+
int mmc_can_ext_csd(struct mmc_card *card)
{
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
}
+/**
+ * mmc_stop_bkops - stop ongoing BKOPS
+ * @card: MMC card to check BKOPS
+ *
+ * Send HPI command to stop ongoing background operations to
+ * allow rapid servicing of foreground operations, e.g. read/
+ * writes. Wait until the card comes out of the programming state
+ * to avoid errors in servicing read/write requests.
+ */
+int mmc_stop_bkops(struct mmc_card *card)
+{
+ int err = 0;
+
+ err = mmc_interrupt_hpi(card);
+
+ /*
+ * If err is EINVAL, we can't issue an HPI.
+ * It should complete the BKOPS.
+ */
+ if (!err || (err == -EINVAL)) {
+ mmc_card_clr_doing_bkops(card);
+ mmc_retune_release(card->host);
+ err = 0;
+ }
+
+ return err;
+}
+
+static int mmc_read_bkops_status(struct mmc_card *card)
+{
+ int err;
+ u8 *ext_csd;
+
+ mmc_claim_host(card->host);
+ err = mmc_get_ext_csd(card, &ext_csd);
+ mmc_release_host(card->host);
+ if (err)
+ return err;
+
+ card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
+ card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+ kfree(ext_csd);
+ return 0;
+}
+
+/**
+ * mmc_start_bkops - start BKOPS for supported cards
+ * @card: MMC card to start BKOPS
+ * @form_exception: A flag to indicate if this function was
+ * called due to an exception raised by the card
+ *
+ * Start background operations whenever requested.
+ * When the urgent BKOPS bit is set in a R1 command response
+ * then background operations should be started immediately.
+*/
+void mmc_start_bkops(struct mmc_card *card, bool from_exception)
+{
+ int err;
+ int timeout;
+ bool use_busy_signal;
+
+ if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
+ return;
+
+ err = mmc_read_bkops_status(card);
+ if (err) {
+ pr_err("%s: Failed to read bkops status: %d\n",
+ mmc_hostname(card->host), err);
+ return;
+ }
+
+ if (!card->ext_csd.raw_bkops_status)
+ return;
+
+ if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
+ from_exception)
+ return;
+
+ mmc_claim_host(card->host);
+ if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
+ timeout = MMC_OPS_TIMEOUT_MS;
+ use_busy_signal = true;
+ } else {
+ timeout = 0;
+ use_busy_signal = false;
+ }
+
+ mmc_retune_hold(card->host);
+
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BKOPS_START, 1, timeout, 0,
+ use_busy_signal, true, false);
+ if (err) {
+ pr_warn("%s: Error %d starting bkops\n",
+ mmc_hostname(card->host), err);
+ mmc_retune_release(card->host);
+ goto out;
+ }
+
+ /*
+ * For urgent bkops status (LEVEL_2 and more)
+ * bkops executed synchronously, otherwise
+ * the operation is in progress
+ */
+ if (!use_busy_signal)
+ mmc_card_set_doing_bkops(card);
+ else
+ mmc_retune_release(card->host);
+out:
+ mmc_release_host(card->host);
+}
+
+/*
+ * Flush the cache to the non-volatile storage.
+ */
+int mmc_flush_cache(struct mmc_card *card)
+{
+ int err = 0;
+
+ if (mmc_card_mmc(card) &&
+ (card->ext_csd.cache_size > 0) &&
+ (card->ext_csd.cache_ctrl & 1)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1, 0);
+ if (err)
+ pr_err("%s: cache flush error %d\n",
+ mmc_hostname(card->host), err);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_flush_cache);
+
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
{
u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 978bd2e60f8a..a1390d486381 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -22,15 +22,14 @@ int mmc_deselect_cards(struct mmc_host *host);
int mmc_set_dsr(struct mmc_host *host);
int mmc_go_idle(struct mmc_host *host);
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
-int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
int mmc_set_relative_addr(struct mmc_card *card);
int mmc_send_csd(struct mmc_card *card, u32 *csd);
+int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
-int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
int mmc_interrupt_hpi(struct mmc_card *card);
int mmc_can_ext_csd(struct mmc_card *card);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
@@ -42,9 +41,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms);
int mmc_stop_bkops(struct mmc_card *card);
-int mmc_read_bkops_status(struct mmc_card *card);
void mmc_start_bkops(struct mmc_card *card, bool from_exception);
-int mmc_can_reset(struct mmc_card *card);
int mmc_flush_cache(struct mmc_card *card);
int mmc_cmdq_enable(struct mmc_card *card);
int mmc_cmdq_disable(struct mmc_card *card);
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index fd1b4b8510b9..7a304a6e5bf1 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -3220,8 +3220,6 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
df = kmalloc(sizeof(*df), GFP_KERNEL);
if (!df) {
debugfs_remove(file);
- dev_err(&card->dev,
- "Can't allocate memory for internal usage.\n");
return -ENOMEM;
}
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index 9386c4771814..e3ad30fa8307 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -76,6 +76,14 @@ void mmc_pwrseq_power_off(struct mmc_host *host)
pwrseq->ops->power_off(host);
}
+void mmc_pwrseq_reset(struct mmc_host *host)
+{
+ struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+ if (pwrseq && pwrseq->ops->reset)
+ pwrseq->ops->reset(host);
+}
+
void mmc_pwrseq_free(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index 39c911aa6ebb..819386f4ec61 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -18,6 +18,7 @@ struct mmc_pwrseq_ops {
void (*pre_power_on)(struct mmc_host *host);
void (*post_power_on)(struct mmc_host *host);
void (*power_off)(struct mmc_host *host);
+ void (*reset)(struct mmc_host *host);
};
struct mmc_pwrseq {
@@ -36,6 +37,7 @@ int mmc_pwrseq_alloc(struct mmc_host *host);
void mmc_pwrseq_pre_power_on(struct mmc_host *host);
void mmc_pwrseq_post_power_on(struct mmc_host *host);
void mmc_pwrseq_power_off(struct mmc_host *host);
+void mmc_pwrseq_reset(struct mmc_host *host);
void mmc_pwrseq_free(struct mmc_host *host);
#else
@@ -49,6 +51,7 @@ static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
static inline void mmc_pwrseq_power_off(struct mmc_host *host) {}
+static inline void mmc_pwrseq_reset(struct mmc_host *host) {}
static inline void mmc_pwrseq_free(struct mmc_host *host) {}
#endif
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index adc9c0c614fb..efb8a7965dd4 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -56,7 +56,7 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
}
static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
- .post_power_on = mmc_pwrseq_emmc_reset,
+ .reset = mmc_pwrseq_emmc_reset,
};
static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 5c37b6be3e7b..affa7370ba82 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -40,35 +40,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_OK;
}
-struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
- struct request *req)
-{
- struct mmc_queue_req *mqrq;
- int i = ffz(mq->qslots);
-
- if (i >= mq->qdepth)
- return NULL;
-
- mqrq = &mq->mqrq[i];
- WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
- test_bit(mqrq->task_id, &mq->qslots));
- mqrq->req = req;
- mq->qcnt += 1;
- __set_bit(mqrq->task_id, &mq->qslots);
-
- return mqrq;
-}
-
-void mmc_queue_req_free(struct mmc_queue *mq,
- struct mmc_queue_req *mqrq)
-{
- WARN_ON(!mqrq->req || mq->qcnt < 1 ||
- !test_bit(mqrq->task_id, &mq->qslots));
- mqrq->req = NULL;
- mq->qcnt -= 1;
- __clear_bit(mqrq->task_id, &mq->qslots);
-}
-
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
@@ -133,7 +104,7 @@ static void mmc_request_fn(struct request_queue *q)
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
req->rq_flags |= RQF_QUIET;
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
}
return;
}
@@ -149,11 +120,11 @@ static void mmc_request_fn(struct request_queue *q)
wake_up_process(mq->thread);
}
-static struct scatterlist *mmc_alloc_sg(int sg_len)
+static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
{
struct scatterlist *sg;
- sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
+ sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
if (sg)
sg_init_table(sg, sg_len);
@@ -179,86 +150,11 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
-static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
-{
- kfree(mqrq->bounce_sg);
- mqrq->bounce_sg = NULL;
-
- kfree(mqrq->sg);
- mqrq->sg = NULL;
-
- kfree(mqrq->bounce_buf);
- mqrq->bounce_buf = NULL;
-}
-
-static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
-{
- int i;
-
- for (i = 0; i < qdepth; i++)
- mmc_queue_req_free_bufs(&mqrq[i]);
-}
-
-static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
-{
- mmc_queue_reqs_free_bufs(mqrq, qdepth);
- kfree(mqrq);
-}
-
-static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
-{
- struct mmc_queue_req *mqrq;
- int i;
-
- mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
- if (mqrq) {
- for (i = 0; i < qdepth; i++)
- mqrq[i].task_id = i;
- }
-
- return mqrq;
-}
-
-#ifdef CONFIG_MMC_BLOCK_BOUNCE
-static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
- unsigned int bouncesz)
-{
- int i;
-
- for (i = 0; i < qdepth; i++) {
- mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mqrq[i].bounce_buf)
- return -ENOMEM;
-
- mqrq[i].sg = mmc_alloc_sg(1);
- if (!mqrq[i].sg)
- return -ENOMEM;
-
- mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
- if (!mqrq[i].bounce_sg)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
- unsigned int bouncesz)
-{
- int ret;
-
- ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
- if (ret)
- mmc_queue_reqs_free_bufs(mqrq, qdepth);
-
- return !ret;
-}
-
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
{
unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
- if (host->max_segs != 1)
+ if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
return 0;
if (bouncesz > host->max_req_size)
@@ -273,84 +169,58 @@ static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
return bouncesz;
}
-#else
-static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq,
- int qdepth, unsigned int bouncesz)
-{
- return false;
-}
-static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
-{
- return 0;
-}
-#endif
-
-static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
- int max_segs)
+/**
+ * mmc_init_request() - initialize the MMC-specific per-request data
+ * @q: the request queue
+ * @req: the request
+ * @gfp: memory allocation policy
+ */
+static int mmc_init_request(struct request_queue *q, struct request *req,
+ gfp_t gfp)
{
- int i;
+ struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
- for (i = 0; i < qdepth; i++) {
- mqrq[i].sg = mmc_alloc_sg(max_segs);
- if (!mqrq[i].sg)
+ if (card->bouncesz) {
+ mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
+ if (!mq_rq->bounce_buf)
+ return -ENOMEM;
+ if (card->bouncesz > 512) {
+ mq_rq->sg = mmc_alloc_sg(1, gfp);
+ if (!mq_rq->sg)
+ return -ENOMEM;
+ mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
+ gfp);
+ if (!mq_rq->bounce_sg)
+ return -ENOMEM;
+ }
+ } else {
+ mq_rq->bounce_buf = NULL;
+ mq_rq->bounce_sg = NULL;
+ mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
+ if (!mq_rq->sg)
return -ENOMEM;
}
return 0;
}
-void mmc_queue_free_shared_queue(struct mmc_card *card)
+static void mmc_exit_request(struct request_queue *q, struct request *req)
{
- if (card->mqrq) {
- mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
- card->mqrq = NULL;
- }
-}
-
-static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
-{
- struct mmc_host *host = card->host;
- struct mmc_queue_req *mqrq;
- unsigned int bouncesz;
- int ret = 0;
-
- if (card->mqrq)
- return -EINVAL;
+ struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- mqrq = mmc_queue_alloc_mqrqs(qdepth);
- if (!mqrq)
- return -ENOMEM;
-
- card->mqrq = mqrq;
- card->qdepth = qdepth;
-
- bouncesz = mmc_queue_calc_bouncesz(host);
-
- if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
- bouncesz = 0;
- pr_warn("%s: unable to allocate bounce buffers\n",
- mmc_card_name(card));
- }
+ /* It is OK to kfree(NULL) so this will be smooth */
+ kfree(mq_rq->bounce_sg);
+ mq_rq->bounce_sg = NULL;
- card->bouncesz = bouncesz;
+ kfree(mq_rq->bounce_buf);
+ mq_rq->bounce_buf = NULL;
- if (!bouncesz) {
- ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
- if (ret)
- goto out_err;
- }
-
- return ret;
-
-out_err:
- mmc_queue_free_shared_queue(card);
- return ret;
-}
-
-int mmc_queue_alloc_shared_queue(struct mmc_card *card)
-{
- return __mmc_queue_alloc_shared_queue(card, 2);
+ kfree(mq_rq->sg);
+ mq_rq->sg = NULL;
}
/**
@@ -373,13 +243,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
- mq->queue = blk_init_queue(mmc_request_fn, lock);
+ mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
return -ENOMEM;
-
- mq->mqrq = card->mqrq;
- mq->qdepth = card->qdepth;
+ mq->queue->queue_lock = lock;
+ mq->queue->request_fn = mmc_request_fn;
+ mq->queue->init_rq_fn = mmc_init_request;
+ mq->queue->exit_rq_fn = mmc_exit_request;
+ mq->queue->cmd_size = sizeof(struct mmc_queue_req);
mq->queue->queuedata = mq;
+ mq->qcnt = 0;
+ ret = blk_init_allocated_queue(mq->queue);
+ if (ret) {
+ blk_cleanup_queue(mq->queue);
+ return ret;
+ }
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -387,8 +265,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
+ card->bouncesz = mmc_queue_calc_bouncesz(host);
if (card->bouncesz) {
- blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
blk_queue_max_segments(mq->queue, card->bouncesz / 512);
blk_queue_max_segment_size(mq->queue, card->bouncesz);
@@ -413,7 +291,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return 0;
cleanup_queue:
- mq->mqrq = NULL;
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -435,7 +312,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- mq->mqrq = NULL;
mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);
@@ -492,12 +368,13 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
+ struct request *req = mmc_queue_req_to_req(mqrq);
int i;
if (!mqrq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ return blk_rq_map_sg(mq->queue, req, mqrq->sg);
- sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+ sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
@@ -519,7 +396,7 @@ void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mqrq->req) != WRITE)
+ if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
return;
sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
@@ -535,7 +412,7 @@ void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mqrq->req) != READ)
+ if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
return;
sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 871796c3f406..361b46408e0f 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -3,19 +3,25 @@
#include <linux/types.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
-static inline bool mmc_req_is_special(struct request *req)
+static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
{
- return req &&
- (req_op(req) == REQ_OP_FLUSH ||
- req_op(req) == REQ_OP_DISCARD ||
- req_op(req) == REQ_OP_SECURE_ERASE);
+ return blk_mq_rq_to_pdu(rq);
+}
+
+struct mmc_queue_req;
+
+static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
+{
+ return blk_mq_rq_from_pdu(mqr);
}
struct task_struct;
struct mmc_blk_data;
+struct mmc_blk_ioc_data;
struct mmc_blk_request {
struct mmc_request mrq;
@@ -26,15 +32,27 @@ struct mmc_blk_request {
int retune_retry_done;
};
+/**
+ * enum mmc_drv_op - enumerates the operations in the mmc_queue_req
+ * @MMC_DRV_OP_IOCTL: ioctl operation
+ * @MMC_DRV_OP_BOOT_WP: write protect boot partitions
+ */
+enum mmc_drv_op {
+ MMC_DRV_OP_IOCTL,
+ MMC_DRV_OP_BOOT_WP,
+};
+
struct mmc_queue_req {
- struct request *req;
struct mmc_blk_request brq;
struct scatterlist *sg;
char *bounce_buf;
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req areq;
- int task_id;
+ enum mmc_drv_op drv_op;
+ int drv_op_result;
+ struct mmc_blk_ioc_data **idata;
+ unsigned int ioc_count;
};
struct mmc_queue {
@@ -45,14 +63,15 @@ struct mmc_queue {
bool asleep;
struct mmc_blk_data *blkdata;
struct request_queue *queue;
- struct mmc_queue_req *mqrq;
- int qdepth;
+ /*
+ * FIXME: this counter is not a very reliable way of keeping
+ * track of how many requests that are ongoing. Switch to just
+ * letting the block core keep track of requests and per-request
+ * associated mmc_queue_req data.
+ */
int qcnt;
- unsigned long qslots;
};
-extern int mmc_queue_alloc_shared_queue(struct mmc_card *card);
-extern void mmc_queue_free_shared_queue(struct mmc_card *card);
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
const char *);
extern void mmc_cleanup_queue(struct mmc_queue *);
@@ -66,8 +85,4 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_access_rpmb(struct mmc_queue *);
-extern struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *,
- struct request *);
-extern void mmc_queue_req_free(struct mmc_queue *, struct mmc_queue_req *);
-
#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d109634fbfce..a1b0aa14d5e3 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -294,12 +294,8 @@ static int mmc_read_switch(struct mmc_card *card)
err = -EIO;
status = kmalloc(64, GFP_KERNEL);
- if (!status) {
- pr_err("%s: could not allocate a buffer for "
- "switch capabilities.\n",
- mmc_hostname(card->host));
+ if (!status)
return -ENOMEM;
- }
/*
* Find out the card's support bits with a mode 0 operation.
@@ -359,11 +355,8 @@ int mmc_sd_switch_hs(struct mmc_card *card)
return 0;
status = kmalloc(64, GFP_KERNEL);
- if (!status) {
- pr_err("%s: could not allocate a buffer for "
- "switch capabilities.\n", mmc_hostname(card->host));
+ if (!status)
return -ENOMEM;
- }
err = mmc_sd_switch(card, 1, 0, 1, status);
if (err)
@@ -596,11 +589,8 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
return 0;
status = kmalloc(64, GFP_KERNEL);
- if (!status) {
- pr_err("%s: could not allocate a buffer for "
- "switch capabilities.\n", mmc_hostname(card->host));
+ if (!status)
return -ENOMEM;
- }
/* Set 4-bit bus width */
if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
@@ -798,11 +788,7 @@ try_again:
}
}
- if (mmc_host_is_spi(host))
- err = mmc_send_cid(host, cid);
- else
- err = mmc_all_send_cid(host, cid);
-
+ err = mmc_send_cid(host, cid);
return err;
}
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index fae732c870a9..cc43687ca241 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1104,6 +1104,12 @@ int mmc_attach_sdio(struct mmc_host *host)
*/
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
/*
+ * Do not allow runtime suspend until after SDIO function
+ * devices are added.
+ */
+ pm_runtime_get_noresume(&card->dev);
+
+ /*
* Let runtime PM core know our card is active
*/
err = pm_runtime_set_active(&card->dev);
@@ -1155,19 +1161,23 @@ int mmc_attach_sdio(struct mmc_host *host)
goto remove_added;
}
+ if (host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_put(&card->dev);
+
mmc_claim_host(host);
return 0;
-remove_added:
- /* Remove without lock if the device has been added. */
- mmc_sdio_remove(host);
- mmc_claim_host(host);
remove:
- /* And with lock if it hasn't been added. */
mmc_release_host(host);
- if (host->card)
- mmc_sdio_remove(host);
+remove_added:
+ /*
+ * The devices are being deleted so it is not necessary to disable
+ * runtime PM. Similarly we also don't pm_runtime_put() the SDIO card
+ * because it needs to be active to remove any function devices that
+ * were probed, and after that it gets deleted.
+ */
+ mmc_sdio_remove(host);
mmc_claim_host(host);
err:
mmc_detach_bus(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 6d4b72080d51..c771843e4c15 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -95,12 +95,30 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
void sdio_run_irqs(struct mmc_host *host)
{
mmc_claim_host(host);
- host->sdio_irq_pending = true;
- process_sdio_pending_irqs(host);
+ if (host->sdio_irqs) {
+ host->sdio_irq_pending = true;
+ process_sdio_pending_irqs(host);
+ if (host->ops->ack_sdio_irq)
+ host->ops->ack_sdio_irq(host);
+ }
mmc_release_host(host);
}
EXPORT_SYMBOL_GPL(sdio_run_irqs);
+void sdio_irq_work(struct work_struct *work)
+{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, sdio_irq_work.work);
+
+ sdio_run_irqs(host);
+}
+
+void sdio_signal_irq(struct mmc_host *host)
+{
+ queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+}
+EXPORT_SYMBOL_GPL(sdio_signal_irq);
+
static int sdio_irq_thread(void *_host)
{
struct mmc_host *host = _host;
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
index ee35cb4d170e..96945cafbf0b 100644
--- a/drivers/mmc/core/sdio_ops.h
+++ b/drivers/mmc/core/sdio_ops.h
@@ -17,6 +17,7 @@
struct mmc_host;
struct mmc_card;
+struct work_struct;
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
@@ -25,6 +26,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
int sdio_reset(struct mmc_host *host);
unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz);
+void sdio_irq_work(struct work_struct *work);
static inline bool sdio_is_io_busy(u32 opcode, u32 arg)
{
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index a8450a8701e4..863f1dbbfc1b 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -151,6 +151,8 @@ void mmc_gpiod_request_cd_irq(struct mmc_host *host)
if (irq < 0)
host->caps |= MMC_CAP_NEEDS_POLL;
+ else if ((host->caps & MMC_CAP_CD_WAKE) && !enable_irq_wake(irq))
+ host->slot.cd_wake_enabled = true;
}
EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);