summaryrefslogtreecommitdiff
path: root/drivers/mmc/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/core')
-rw-r--r--drivers/mmc/core/block.c11
-rw-r--r--drivers/mmc/core/core.c22
-rw-r--r--drivers/mmc/core/core.h9
-rw-r--r--drivers/mmc/core/debugfs.c1
-rw-r--r--drivers/mmc/core/host.c3
-rw-r--r--drivers/mmc/core/mmc.c68
-rw-r--r--drivers/mmc/core/mmc_ops.c163
-rw-r--r--drivers/mmc/core/mmc_ops.h12
-rw-r--r--drivers/mmc/core/sd.c481
-rw-r--r--drivers/mmc/core/sd_ops.c38
-rw-r--r--drivers/mmc/core/sdio.c6
11 files changed, 636 insertions, 178 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 689eb9afeeed..88f4c215caa6 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block driver for media (i.e., flash cards)
*
@@ -1004,6 +1005,12 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
+ if (card->ext_csd.cmdq_en) {
+ ret = mmc_cmdq_disable(card);
+ if (ret)
+ break;
+ }
+ fallthrough;
case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
@@ -1014,6 +1021,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
/* Always switch back to main area after RPMB access */
if (rpmb_ioctl)
mmc_blk_part_switch(card, 0);
+ else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
+ mmc_cmdq_enable(card);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1159,7 +1168,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
int ret = 0;
- ret = mmc_flush_cache(card);
+ ret = mmc_flush_cache(card->host);
blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f194940c5974..b039dcff17f8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1582,7 +1582,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
{
struct mmc_command cmd = {};
unsigned int qty = 0, busy_timeout = 0;
- bool use_r1b_resp = false;
+ bool use_r1b_resp;
int err;
mmc_retune_hold(card->host);
@@ -1650,23 +1650,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
busy_timeout = mmc_erase_timeout(card, arg, qty);
- /*
- * If the host controller supports busy signalling and the timeout for
- * the erase operation does not exceed the max_busy_timeout, we should
- * use R1B response. Or we need to prevent the host from doing hw busy
- * detection, which is done by converting to a R1 response instead.
- * Note, some hosts requires R1B, which also means they are on their own
- * when it comes to deal with the busy timeout.
- */
- if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
- card->host->max_busy_timeout &&
- busy_timeout > card->host->max_busy_timeout) {
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- } else {
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = busy_timeout;
- use_r1b_resp = true;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
@@ -1687,7 +1671,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
goto out;
/* Let's poll to find out when the erase operation completes. */
- err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE);
+ err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE);
out:
mmc_retune_release(card->host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index db3c9c68875d..0c4de2030b3f 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -30,6 +30,7 @@ struct mmc_bus_ops {
int (*hw_reset)(struct mmc_host *);
int (*sw_reset)(struct mmc_host *);
bool (*cache_enabled)(struct mmc_host *);
+ int (*flush_cache)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -172,4 +173,12 @@ static inline bool mmc_cache_enabled(struct mmc_host *host)
return false;
}
+static inline int mmc_flush_cache(struct mmc_host *host)
+{
+ if (host->bus_ops->flush_cache)
+ return host->bus_ops->flush_cache(host);
+
+ return 0;
+}
+
#endif
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 9ec84c86c46a..3fdbc801e64a 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -26,6 +26,7 @@
static DECLARE_FAULT_ATTR(fail_default_attr);
static char *fail_request;
module_param(fail_request, charp, 0);
+MODULE_PARM_DESC(fail_request, "default fault injection attributes");
#endif /* CONFIG_FAIL_MMC_REQUEST */
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0b0577990ddc..eda4a1892c33 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -388,6 +388,9 @@ int mmc_of_parse(struct mmc_host *host)
host->caps2 |= MMC_CAP2_NO_SD;
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
+ if (device_property_read_bool(dev, "no-mmc-hs400"))
+ host->caps2 &= ~(MMC_CAP2_HS400_1_8V | MMC_CAP2_HS400_1_2V |
+ MMC_CAP2_HS400_ES);
/* Must be after "non-removable" check */
if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 8674c3e0c02c..838726b68ff3 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -28,6 +28,7 @@
#define DEFAULT_CMD6_TIMEOUT_MS 500
#define MIN_CACHE_EN_TIMEOUT_MS 1600
+#define CACHE_FLUSH_TIMEOUT_MS 30000 /* 30s */
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
@@ -1905,11 +1906,20 @@ static int mmc_can_sleep(struct mmc_card *card)
return card->ext_csd.rev >= 3;
}
+static int mmc_sleep_busy_cb(void *cb_data, bool *busy)
+{
+ struct mmc_host *host = cb_data;
+
+ *busy = host->ops->card_busy(host);
+ return 0;
+}
+
static int mmc_sleep(struct mmc_host *host)
{
struct mmc_command cmd = {};
struct mmc_card *card = host->card;
unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ bool use_r1b_resp;
int err;
/* Re-tuning can't be done once the card is deselected */
@@ -1922,35 +1932,27 @@ static int mmc_sleep(struct mmc_host *host)
cmd.opcode = MMC_SLEEP_AWAKE;
cmd.arg = card->rca << 16;
cmd.arg |= 1 << 15;
-
- /*
- * If the max_busy_timeout of the host is specified, validate it against
- * the sleep cmd timeout. A failure means we need to prevent the host
- * from doing hw busy detection, which is done by converting to a R1
- * response instead of a R1B. Note, some hosts requires R1B, which also
- * means they are on their own when it comes to deal with the busy
- * timeout.
- */
- if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout)) {
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- } else {
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = timeout_ms;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto out_release;
/*
- * If the host does not wait while the card signals busy, then we will
- * will have to wait the sleep/awake timeout. Note, we cannot use the
- * SEND_STATUS command to poll the status because that command (and most
- * others) is invalid while the card sleeps.
+ * If the host does not wait while the card signals busy, then we can
+ * try to poll, but only if the host supports HW polling, as the
+ * SEND_STATUS cmd is not allowed. If we can't poll, then we simply need
+ * to wait the sleep/awake timeout.
*/
- if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
+ goto out_release;
+
+ if (!host->ops->card_busy) {
mmc_delay(timeout_ms);
+ goto out_release;
+ }
+
+ err = __mmc_poll_for_busy(card, timeout_ms, &mmc_sleep_busy_cb, host);
out_release:
mmc_retune_release(host);
@@ -2035,6 +2037,25 @@ static bool _mmc_cache_enabled(struct mmc_host *host)
host->card->ext_csd.cache_ctrl & 1;
}
+/*
+ * Flush the internal cache of the eMMC to non-volatile storage.
+ */
+static int _mmc_flush_cache(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (_mmc_cache_enabled(host)) {
+ err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ CACHE_FLUSH_TIMEOUT_MS);
+ if (err)
+ pr_err("%s: cache flush error %d\n",
+ mmc_hostname(host), err);
+ }
+
+ return err;
+}
+
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
int err = 0;
@@ -2046,7 +2067,7 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
if (mmc_card_suspended(host->card))
goto out;
- err = mmc_flush_cache(host->card);
+ err = _mmc_flush_cache(host);
if (err)
goto out;
@@ -2187,7 +2208,7 @@ static int _mmc_hw_reset(struct mmc_host *host)
* In the case of recovery, we can't expect flushing the cache to work
* always, but we have a go and ignore errors.
*/
- mmc_flush_cache(host->card);
+ _mmc_flush_cache(host);
if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
mmc_can_reset(card)) {
@@ -2215,6 +2236,7 @@ static const struct mmc_bus_ops mmc_ops = {
.shutdown = mmc_shutdown,
.hw_reset = _mmc_hw_reset,
.cache_enabled = _mmc_cache_enabled,
+ .flush_cache = _mmc_flush_cache,
};
/*
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 5756781fef37..973756ed4016 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -20,7 +20,6 @@
#include "mmc_ops.h"
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
-#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
static const u8 tuning_blk_pattern_4bit[] = {
@@ -53,6 +52,12 @@ static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
+struct mmc_busy_data {
+ struct mmc_card *card;
+ bool retry_crc_err;
+ enum mmc_busy_cmd busy_cmd;
+};
+
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
{
int err;
@@ -246,9 +251,8 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
* NOTE: void *buf, caller for the buf is required to use DMA-capable
* buffer or on-stack buffer (with some overhead in callee).
*/
-static int
-mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
- u32 opcode, void *buf, unsigned len)
+int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
+ u32 args, void *buf, unsigned len)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
@@ -259,7 +263,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
mrq.data = &data;
cmd.opcode = opcode;
- cmd.arg = 0;
+ cmd.arg = args;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
@@ -305,7 +309,7 @@ static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
if (!cxd_tmp)
return -ENOMEM;
- ret = mmc_send_cxd_data(NULL, host, opcode, cxd_tmp, 16);
+ ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
if (ret)
goto err;
@@ -353,7 +357,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
if (!ext_csd)
return -ENOMEM;
- err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
+ err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
512);
if (err)
kfree(ext_csd);
@@ -424,10 +428,10 @@ int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
return mmc_switch_status_error(card->host, status);
}
-static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
- enum mmc_busy_cmd busy_cmd, bool *busy)
+static int mmc_busy_cb(void *cb_data, bool *busy)
{
- struct mmc_host *host = card->host;
+ struct mmc_busy_data *data = cb_data;
+ struct mmc_host *host = data->card->host;
u32 status = 0;
int err;
@@ -436,22 +440,23 @@ static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
return 0;
}
- err = mmc_send_status(card, &status);
- if (retry_crc_err && err == -EILSEQ) {
+ err = mmc_send_status(data->card, &status);
+ if (data->retry_crc_err && err == -EILSEQ) {
*busy = true;
return 0;
}
if (err)
return err;
- switch (busy_cmd) {
+ switch (data->busy_cmd) {
case MMC_BUSY_CMD6:
- err = mmc_switch_status_error(card->host, status);
+ err = mmc_switch_status_error(host, status);
break;
case MMC_BUSY_ERASE:
err = R1_STATUS(status) ? -EIO : 0;
break;
case MMC_BUSY_HPI:
+ case MMC_BUSY_EXTR_SINGLE:
break;
default:
err = -EINVAL;
@@ -464,9 +469,9 @@ static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
return 0;
}
-static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- bool send_status, bool retry_crc_err,
- enum mmc_busy_cmd busy_cmd)
+int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ int (*busy_cb)(void *cb_data, bool *busy),
+ void *cb_data)
{
struct mmc_host *host = card->host;
int err;
@@ -475,16 +480,6 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool expired = false;
bool busy = false;
- /*
- * In cases when not allowed to poll by using CMD13 or because we aren't
- * capable of polling by using ->card_busy(), then rely on waiting the
- * stated timeout to be sufficient.
- */
- if (!send_status && !host->ops->card_busy) {
- mmc_delay(timeout_ms);
- return 0;
- }
-
timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
/*
@@ -493,7 +488,7 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
*/
expired = time_after(jiffies, timeout);
- err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
+ err = (*busy_cb)(cb_data, &busy);
if (err)
return err;
@@ -516,9 +511,36 @@ static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
}
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- enum mmc_busy_cmd busy_cmd)
+ bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
+{
+ struct mmc_busy_data cb_data;
+
+ cb_data.card = card;
+ cb_data.retry_crc_err = retry_crc_err;
+ cb_data.busy_cmd = busy_cmd;
+
+ return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
+}
+
+bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ unsigned int timeout_ms)
{
- return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
+ /*
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B. Note, some hosts requires R1B, which also means
+ * they are on their own when it comes to deal with the busy timeout.
+ */
+ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+ (timeout_ms > host->max_busy_timeout)) {
+ cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
+ return false;
+ }
+
+ cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ cmd->busy_timeout = timeout_ms;
+ return true;
}
/**
@@ -543,7 +565,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {};
- bool use_r1b_resp = true;
+ bool use_r1b_resp;
unsigned char old_timing = host->ios.timing;
mmc_retune_hold(host);
@@ -554,29 +576,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
timeout_ms = card->ext_csd.generic_cmd6_time;
}
- /*
- * If the max_busy_timeout of the host is specified, make sure it's
- * enough to fit the used timeout_ms. In case it's not, let's instruct
- * the host to avoid HW busy detection, by converting to a R1 response
- * instead of a R1B. Note, some hosts requires R1B, which also means
- * they are on their own when it comes to deal with the busy timeout.
- */
- if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
-
cmd.opcode = MMC_SWITCH;
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
(index << 16) |
(value << 8) |
set;
- cmd.flags = MMC_CMD_AC;
- if (use_r1b_resp) {
- cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
- }
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, retries);
if (err)
@@ -587,9 +592,18 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
mmc_host_is_spi(host))
goto out_tim;
+ /*
+ * If the host doesn't support HW polling via the ->card_busy() ops and
+ * when it's not allowed to poll by using CMD13, then we need to rely on
+ * waiting the stated timeout to be sufficient.
+ */
+ if (!send_status && !host->ops->card_busy) {
+ mmc_delay(timeout_ms);
+ goto out_tim;
+ }
+
/* Let's try to poll to find out when the command is completed. */
- err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
- MMC_BUSY_CMD6);
+ err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
if (err)
goto out;
@@ -686,7 +700,7 @@ out:
}
EXPORT_SYMBOL_GPL(mmc_send_tuning);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
{
struct mmc_command cmd = {};
@@ -709,7 +723,7 @@ int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
return mmc_wait_for_cmd(host, &cmd, 0);
}
-EXPORT_SYMBOL_GPL(mmc_abort_tuning);
+EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
@@ -813,28 +827,17 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
{
unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
struct mmc_host *host = card->host;
- bool use_r1b_resp = true;
+ bool use_r1b_resp = false;
struct mmc_command cmd = {};
int err;
cmd.opcode = card->ext_csd.hpi_cmd;
cmd.arg = card->rca << 16 | 1;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- /*
- * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
- * In case it doesn't, let's instruct the host to avoid HW busy
- * detection, by using a R1 response instead of R1B.
- */
- if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
- use_r1b_resp = false;
-
- if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = busy_timeout_ms;
- } else {
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- use_r1b_resp = false;
- }
+ if (cmd.opcode == MMC_STOP_TRANSMISSION)
+ use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
+ busy_timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err) {
@@ -848,7 +851,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
return 0;
/* Let's poll to find out when the HPI request completes. */
- return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
+ return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
}
/**
@@ -961,26 +964,6 @@ void mmc_run_bkops(struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_run_bkops);
-/*
- * Flush the cache to the non-volatile storage.
- */
-int mmc_flush_cache(struct mmc_card *card)
-{
- int err = 0;
-
- if (mmc_cache_enabled(card->host)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1,
- MMC_CACHE_FLUSH_TIMEOUT_MS);
- if (err)
- pr_err("%s: cache flush error %d\n",
- mmc_hostname(card->host), err);
- }
-
- return err;
-}
-EXPORT_SYMBOL(mmc_flush_cache);
-
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
{
u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 7bc1cfb0654c..41ab4f573a31 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -14,10 +14,12 @@ enum mmc_busy_cmd {
MMC_BUSY_CMD6,
MMC_BUSY_ERASE,
MMC_BUSY_HPI,
+ MMC_BUSY_EXTR_SINGLE,
};
struct mmc_host;
struct mmc_card;
+struct mmc_command;
int mmc_select_card(struct mmc_card *card);
int mmc_deselect_cards(struct mmc_host *host);
@@ -25,6 +27,8 @@ int mmc_set_dsr(struct mmc_host *host);
int mmc_go_idle(struct mmc_host *host);
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_set_relative_addr(struct mmc_card *card);
+int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
+ u32 args, void *buf, unsigned len);
int mmc_send_csd(struct mmc_card *card, u32 *csd);
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
int mmc_send_status(struct mmc_card *card, u32 *status);
@@ -35,15 +39,19 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_can_ext_csd(struct mmc_card *card);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
+bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ unsigned int timeout_ms);
+int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
+ int (*busy_cb)(void *cb_data, bool *busy),
+ void *cb_data);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
- enum mmc_busy_cmd busy_cmd);
+ bool retry_crc_err, enum mmc_busy_cmd busy_cmd);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
bool send_status, bool retry_crc_err, unsigned int retries);
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms);
void mmc_run_bkops(struct mmc_card *card);
-int mmc_flush_cache(struct mmc_card *card);
int mmc_cmdq_enable(struct mmc_card *card);
int mmc_cmdq_disable(struct mmc_card *card);
int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 2c48d6504101..4646b7a03db6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -66,6 +66,14 @@ static const unsigned int sd_au_size[] = {
__res & __mask; \
})
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
+
+struct sd_busy_data {
+ struct mmc_card *card;
+ u8 *reg_buf;
+};
+
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
@@ -222,7 +230,9 @@ static int mmc_decode_scr(struct mmc_card *card)
else
card->erased_byte = 0x0;
- if (scr->sda_spec3)
+ if (scr->sda_spec4)
+ scr->cmds = UNSTUFF_BITS(resp, 32, 4);
+ else if (scr->sda_spec3)
scr->cmds = UNSTUFF_BITS(resp, 32, 2);
/* SD Spec says: any SD Card shall set at least bits 0 and 2 */
@@ -847,11 +857,13 @@ try_again:
return err;
/*
- * In case CCS and S18A in the response is set, start Signal Voltage
- * Switch procedure. SPI mode doesn't support CMD11.
+ * In case the S18A bit is set in the response, let's start the signal
+ * voltage switch procedure. SPI mode doesn't support CMD11.
+ * Note that, according to the spec, the S18A bit is not valid unless
+ * the CCS bit is set as well. We deliberately deviate from the spec in
+ * regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr &&
- ((*rocr & 0x41000000) == 0x41000000)) {
+ if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
@@ -994,6 +1006,380 @@ static bool mmc_sd_card_using_v18(struct mmc_card *card)
(SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
}
+static int sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
+ u8 reg_data)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ /*
+ * Arguments of CMD49:
+ * [31:31] MIO (0 = memory).
+ * [30:27] FNO (function number).
+ * [26:26] MW - mask write mode (0 = disable).
+ * [25:18] page number.
+ * [17:9] offset address.
+ * [8:0] length (0 = 1 byte).
+ */
+ cmd.arg = fno << 27 | page << 18 | offset << 9;
+
+ /* The first byte in the buffer is the data to be written. */
+ reg_buf[0] = reg_data;
+
+ data.flags = MMC_DATA_WRITE;
+ data.blksz = 512;
+ data.blocks = 1;
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, reg_buf, 512);
+
+ cmd.opcode = SD_WRITE_EXTR_SINGLE;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ mmc_set_data_timeout(&data, card);
+ mmc_wait_for_req(host, &mrq);
+
+ kfree(reg_buf);
+
+ /*
+ * Note that, the SD card is allowed to signal busy on DAT0 up to 1s
+ * after the CMD49. Although, let's leave this to be managed by the
+ * caller.
+ */
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+static int sd_read_ext_reg(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset, u16 len, u8 *reg_buf)
+{
+ u32 cmd_args;
+
+ /*
+ * Command arguments of CMD48:
+ * [31:31] MIO (0 = memory).
+ * [30:27] FNO (function number).
+ * [26:26] reserved (0).
+ * [25:18] page number.
+ * [17:9] offset address.
+ * [8:0] length (0 = 1 byte, 1ff = 512 bytes).
+ */
+ cmd_args = fno << 27 | page << 18 | offset << 9 | (len -1);
+
+ return mmc_send_adtc_data(card, card->host, SD_READ_EXTR_SINGLE,
+ cmd_args, reg_buf, 512);
+}
+
+static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset)
+{
+ int err;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /* Read the extension register for power management function. */
+ err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading PM func of ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* PM revision consists of 4 bits. */
+ card->ext_power.rev = reg_buf[0] & 0xf;
+
+ /* Power Off Notification support at bit 4. */
+ if (reg_buf[1] & BIT(4))
+ card->ext_power.feature_support |= SD_EXT_POWER_OFF_NOTIFY;
+
+ /* Power Sustenance support at bit 5. */
+ if (reg_buf[1] & BIT(5))
+ card->ext_power.feature_support |= SD_EXT_POWER_SUSTENANCE;
+
+ /* Power Down Mode support at bit 6. */
+ if (reg_buf[1] & BIT(6))
+ card->ext_power.feature_support |= SD_EXT_POWER_DOWN_MODE;
+
+ card->ext_power.fno = fno;
+ card->ext_power.page = page;
+ card->ext_power.offset = offset;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset)
+{
+ int err;
+ u8 *reg_buf;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading PERF func of ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* PERF revision. */
+ card->ext_perf.rev = reg_buf[0];
+
+ /* FX_EVENT support at bit 0. */
+ if (reg_buf[1] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_FX_EVENT;
+
+ /* Card initiated self-maintenance support at bit 0. */
+ if (reg_buf[2] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_CARD_MAINT;
+
+ /* Host initiated self-maintenance support at bit 1. */
+ if (reg_buf[2] & BIT(1))
+ card->ext_perf.feature_support |= SD_EXT_PERF_HOST_MAINT;
+
+ /* Cache support at bit 0. */
+ if (reg_buf[4] & BIT(0))
+ card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
+
+ /* Command queue support indicated via queue depth bits (0 to 4). */
+ if (reg_buf[6] & 0x1f)
+ card->ext_perf.feature_support |= SD_EXT_PERF_CMD_QUEUE;
+
+ card->ext_perf.fno = fno;
+ card->ext_perf.page = page;
+ card->ext_perf.offset = offset;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_parse_ext_reg(struct mmc_card *card, u8 *gen_info_buf,
+ u16 *next_ext_addr)
+{
+ u8 num_regs, fno, page;
+ u16 sfc, offset, ext = *next_ext_addr;
+ u32 reg_addr;
+
+ /*
+ * Parse only one register set per extension, as that is sufficient to
+ * support the standard functions. This means another 48 bytes in the
+ * buffer must be available.
+ */
+ if (ext + 48 > 512)
+ return -EFAULT;
+
+ /* Standard Function Code */
+ memcpy(&sfc, &gen_info_buf[ext], 2);
+
+ /* Address to the next extension. */
+ memcpy(next_ext_addr, &gen_info_buf[ext + 40], 2);
+
+ /* Number of registers for this extension. */
+ num_regs = gen_info_buf[ext + 42];
+
+ /* We support only one register per extension. */
+ if (num_regs != 1)
+ return 0;
+
+ /* Extension register address. */
+ memcpy(&reg_addr, &gen_info_buf[ext + 44], 4);
+
+ /* 9 bits (0 to 8) contains the offset address. */
+ offset = reg_addr & 0x1ff;
+
+ /* 8 bits (9 to 16) contains the page number. */
+ page = reg_addr >> 9 & 0xff ;
+
+ /* 4 bits (18 to 21) contains the function number. */
+ fno = reg_addr >> 18 & 0xf;
+
+ /* Standard Function Code for power management. */
+ if (sfc == 0x1)
+ return sd_parse_ext_reg_power(card, fno, page, offset);
+
+ /* Standard Function Code for performance enhancement. */
+ if (sfc == 0x2)
+ return sd_parse_ext_reg_perf(card, fno, page, offset);
+
+ return 0;
+}
+
+static int sd_read_ext_regs(struct mmc_card *card)
+{
+ int err, i;
+ u8 num_ext, *gen_info_buf;
+ u16 rev, len, next_ext_addr;
+
+ if (mmc_host_is_spi(card->host))
+ return 0;
+
+ if (!(card->scr.cmds & SD_SCR_CMD48_SUPPORT))
+ return 0;
+
+ gen_info_buf = kzalloc(512, GFP_KERNEL);
+ if (!gen_info_buf)
+ return -ENOMEM;
+
+ /*
+ * Read 512 bytes of general info, which is found at function number 0,
+ * at page 0 and with no offset.
+ */
+ err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
+ if (err) {
+ pr_warn("%s: error %d reading general info of SD ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ /* General info structure revision. */
+ memcpy(&rev, &gen_info_buf[0], 2);
+
+ /* Length of general info in bytes. */
+ memcpy(&len, &gen_info_buf[2], 2);
+
+ /* Number of extensions to be find. */
+ num_ext = gen_info_buf[4];
+
+ /* We support revision 0, but limit it to 512 bytes for simplicity. */
+ if (rev != 0 || len > 512) {
+ pr_warn("%s: non-supported SD ext reg layout\n",
+ mmc_hostname(card->host));
+ goto out;
+ }
+
+ /*
+ * Parse the extension registers. The first extension should start
+ * immediately after the general info header (16 bytes).
+ */
+ next_ext_addr = 16;
+ for (i = 0; i < num_ext; i++) {
+ err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr);
+ if (err) {
+ pr_warn("%s: error %d parsing SD ext reg\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+ }
+
+out:
+ kfree(gen_info_buf);
+ return err;
+}
+
+static bool sd_cache_enabled(struct mmc_host *host)
+{
+ return host->card->ext_perf.feature_enabled & SD_EXT_PERF_CACHE;
+}
+
+static int sd_flush_cache(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ u8 *reg_buf, fno, page;
+ u16 offset;
+ int err;
+
+ if (!sd_cache_enabled(host))
+ return 0;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set Flush Cache at bit 0 in the performance enhancement register at
+ * 261 bytes offset.
+ */
+ fno = card->ext_perf.fno;
+ page = card->ext_perf.page;
+ offset = card->ext_perf.offset + 261;
+
+ err = sd_write_ext_reg(card, fno, page, offset, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Cache Flush bit\n",
+ mmc_hostname(host), err);
+ goto out;
+ }
+
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (err)
+ goto out;
+
+ /*
+ * Read the Flush Cache bit. The card shall reset it, to confirm that
+ * it's has completed the flushing of the cache.
+ */
+ err = sd_read_ext_reg(card, fno, page, offset, 1, reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading Cache Flush bit\n",
+ mmc_hostname(host), err);
+ goto out;
+ }
+
+ if (reg_buf[0] & BIT(0))
+ err = -ETIMEDOUT;
+out:
+ kfree(reg_buf);
+ return err;
+}
+
+static int sd_enable_cache(struct mmc_card *card)
+{
+ u8 *reg_buf;
+ int err;
+
+ card->ext_perf.feature_enabled &= ~SD_EXT_PERF_CACHE;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set Cache Enable at bit 0 in the performance enhancement register at
+ * 260 bytes offset.
+ */
+ err = sd_write_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
+ card->ext_perf.offset + 260, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Cache Enable bit\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (!err)
+ card->ext_perf.feature_enabled |= SD_EXT_PERF_CACHE;
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -1142,6 +1528,20 @@ retry:
}
}
+ if (!oldcard) {
+ /* Read/parse the extension registers. */
+ err = sd_read_ext_regs(card);
+ if (err)
+ goto free_card;
+ }
+
+ /* Enable internal SD cache if supported. */
+ if (card->ext_perf.feature_support & SD_EXT_PERF_CACHE) {
+ err = sd_enable_cache(card);
+ if (err)
+ goto free_card;
+ }
+
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
@@ -1213,21 +1613,84 @@ static void mmc_sd_detect(struct mmc_host *host)
}
}
+static int sd_can_poweroff_notify(struct mmc_card *card)
+{
+ return card->ext_power.feature_support & SD_EXT_POWER_OFF_NOTIFY;
+}
+
+static int sd_busy_poweroff_notify_cb(void *cb_data, bool *busy)
+{
+ struct sd_busy_data *data = cb_data;
+ struct mmc_card *card = data->card;
+ int err;
+
+ /*
+ * Read the status register for the power management function. It's at
+ * one byte offset and is one byte long. The Power Off Notification
+ * Ready is bit 0.
+ */
+ err = sd_read_ext_reg(card, card->ext_power.fno, card->ext_power.page,
+ card->ext_power.offset + 1, 1, data->reg_buf);
+ if (err) {
+ pr_warn("%s: error %d reading status reg of PM func\n",
+ mmc_hostname(card->host), err);
+ return err;
+ }
+
+ *busy = !(data->reg_buf[0] & BIT(0));
+ return 0;
+}
+
+static int sd_poweroff_notify(struct mmc_card *card)
+{
+ struct sd_busy_data cb_data;
+ u8 *reg_buf;
+ int err;
+
+ reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ /*
+ * Set the Power Off Notification bit in the power management settings
+ * register at 2 bytes offset.
+ */
+ err = sd_write_ext_reg(card, card->ext_power.fno, card->ext_power.page,
+ card->ext_power.offset + 2, BIT(0));
+ if (err) {
+ pr_warn("%s: error %d writing Power Off Notify bit\n",
+ mmc_hostname(card->host), err);
+ goto out;
+ }
+
+ cb_data.card = card;
+ cb_data.reg_buf = reg_buf;
+ err = __mmc_poll_for_busy(card, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
+ &sd_busy_poweroff_notify_cb, &cb_data);
+
+out:
+ kfree(reg_buf);
+ return err;
+}
+
static int _mmc_sd_suspend(struct mmc_host *host)
{
+ struct mmc_card *card = host->card;
int err = 0;
mmc_claim_host(host);
- if (mmc_card_suspended(host->card))
+ if (mmc_card_suspended(card))
goto out;
- if (!mmc_host_is_spi(host))
+ if (sd_can_poweroff_notify(card))
+ err = sd_poweroff_notify(card);
+ else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
if (!err) {
mmc_power_off(host);
- mmc_card_set_suspended(host->card);
+ mmc_card_set_suspended(card);
}
out:
@@ -1331,6 +1794,8 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.alive = mmc_sd_alive,
.shutdown = mmc_sd_suspend,
.hw_reset = mmc_sd_hw_reset,
+ .cache_enabled = sd_cache_enabled,
+ .flush_cache = sd_flush_cache,
};
/*
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index d61ff811218c..ef8d1dce5af1 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -17,6 +17,7 @@
#include "core.h"
#include "sd_ops.h"
+#include "mmc_ops.h"
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
@@ -309,43 +310,18 @@ int mmc_app_send_scr(struct mmc_card *card)
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
- struct mmc_request mrq = {};
- struct mmc_command cmd = {};
- struct mmc_data data = {};
- struct scatterlist sg;
+ u32 cmd_args;
/* NOTE: caller guarantees resp is heap-allocated */
mode = !!mode;
value &= 0xF;
+ cmd_args = mode << 31 | 0x00FFFFFF;
+ cmd_args &= ~(0xF << (group * 4));
+ cmd_args |= value << (group * 4);
- mrq.cmd = &cmd;
- mrq.data = &data;
-
- cmd.opcode = SD_SWITCH;
- cmd.arg = mode << 31 | 0x00FFFFFF;
- cmd.arg &= ~(0xF << (group * 4));
- cmd.arg |= value << (group * 4);
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-
- data.blksz = 64;
- data.blocks = 1;
- data.flags = MMC_DATA_READ;
- data.sg = &sg;
- data.sg_len = 1;
-
- sg_init_one(&sg, resp, 64);
-
- mmc_set_data_timeout(&data, card);
-
- mmc_wait_for_req(card->host, &mrq);
-
- if (cmd.error)
- return cmd.error;
- if (data.error)
- return data.error;
-
- return 0;
+ return mmc_send_adtc_data(card, card->host, SD_SWITCH, cmd_args, resp,
+ 64);
}
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 3eb94ac2712e..68edf7a615be 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -937,11 +937,9 @@ static void mmc_sdio_detect(struct mmc_host *host)
/* Make sure card is powered before detecting it */
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
- err = pm_runtime_get_sync(&host->card->dev);
- if (err < 0) {
- pm_runtime_put_noidle(&host->card->dev);
+ err = pm_runtime_resume_and_get(&host->card->dev);
+ if (err < 0)
goto out;
- }
}
mmc_claim_host(host);