summaryrefslogtreecommitdiff
path: root/drivers/ufs/host/ufs-qcom.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host/ufs-qcom.c')
-rw-r--r--drivers/ufs/host/ufs-qcom.c420
1 files changed, 332 insertions, 88 deletions
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 77f8ddb1f207..76fc70503a62 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -5,6 +5,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/devfreq.h>
#include <linux/gpio/consumer.h>
@@ -102,8 +103,28 @@ static const struct __ufs_qcom_bw_table {
[MODE_MAX][0][0] = { 7643136, 819200 },
};
+static const struct {
+ int nminor;
+ char *prefix;
+} testbus_info[TSTBUS_MAX] = {
+ [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"},
+ [TSTBUS_UARM] = {32, "TSTBUS_UARM"},
+ [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"},
+ [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"},
+ [TSTBUS_DFC] = {32, "TSTBUS_DFC"},
+ [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"},
+ [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"},
+ [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"},
+ [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"},
+ [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"},
+ [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"},
+ [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"},
+};
+
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
+static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba,
+ unsigned long freq, char *name);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@@ -173,7 +194,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
profile->ll_ops = ufs_qcom_crypto_ops;
profile->max_dun_bytes_supported = 8;
- profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
+ profile->key_types_supported = qcom_ice_get_supported_key_type(ice);
profile->dev = dev;
/*
@@ -221,17 +242,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
- /* Only AES-256-XTS has been tested so far. */
- if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
- return -EOPNOTSUPP;
-
ufshcd_hold(hba);
- err = qcom_ice_program_key(host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->bytes,
- key->crypto_cfg.data_unit_size / 512,
- slot);
+ err = qcom_ice_program_key(host->ice, slot, key);
ufshcd_release(hba);
return err;
}
@@ -250,9 +262,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile,
return err;
}
+static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size,
+ sw_secret);
+}
+
+static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key);
+}
+
+static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_generate_key(host->ice, lt_key);
+}
+
+static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key);
+}
+
static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = {
.keyslot_program = ufs_qcom_ice_keyslot_program,
.keyslot_evict = ufs_qcom_ice_keyslot_evict,
+ .derive_sw_secret = ufs_qcom_ice_derive_sw_secret,
+ .import_key = ufs_qcom_ice_import_key,
+ .generate_key = ufs_qcom_ice_generate_key,
+ .prepare_key = ufs_qcom_ice_prepare_key,
};
#else
@@ -452,10 +508,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
if (ret)
return ret;
- if (phy->power_count) {
+ if (phy->power_count)
phy_power_off(phy);
- phy_exit(phy);
- }
+
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
@@ -477,6 +532,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
goto out_disable_phy;
}
+ ret = phy_calibrate(phy);
+ if (ret) {
+ dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret);
+ goto out_disable_phy;
+ }
+
ufs_qcom_select_unipro_mode(host);
return 0;
@@ -497,11 +558,32 @@ out_disable_phy:
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
+ int err;
+
+ /* Enable UTP internal clock gating */
ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
ufshcd_readl(hba, REG_UFS_CFG2);
+
+ /* Enable Unipro internal clock gating */
+ err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
+ DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
+ PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL);
+out:
+ if (err)
+ dev_err(hba->dev, "hw clk gating enabled failed\n");
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -543,13 +625,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
*
* @hba: host controller instance
* @is_pre_scale_up: flag to check if pre scale up condition.
+ * @freq: target opp freq
* Return: zero for success and non-zero in case of a failure.
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
- unsigned long core_clk_rate = 0;
+ unsigned long clk_freq = 0;
u32 core_clk_cycles_per_us;
/*
@@ -561,22 +644,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
+ if (hba->use_pm_opp && freq != ULONG_MAX) {
+ clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk");
+ if (clk_freq)
+ goto cfg_timers;
+ }
+
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk")) {
+ if (freq == ULONG_MAX) {
+ clk_freq = clki->max_freq;
+ break;
+ }
+
if (is_pre_scale_up)
- core_clk_rate = clki->max_freq;
+ clk_freq = clki->max_freq;
else
- core_clk_rate = clk_get_rate(clki->clk);
+ clk_freq = clk_get_rate(clki->clk);
break;
}
}
+cfg_timers:
/* If frequency is smaller than 1MHz, set to 1MHz */
- if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
- core_clk_rate = DEFAULT_CLK_RATE_HZ;
+ if (clk_freq < DEFAULT_CLK_RATE_HZ)
+ clk_freq = DEFAULT_CLK_RATE_HZ;
- core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
+ core_clk_cycles_per_us = clk_freq / USEC_PER_SEC;
if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
/*
@@ -596,13 +691,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, false)) {
+ if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
- err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
+ err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
@@ -637,26 +732,17 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
if (status == PRE_CHANGE)
return 0;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
+ if (!ufs_qcom_is_link_active(hba))
ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- /* reset the connected UFS device during power down */
- ufs_qcom_device_reset_ctrl(hba, true);
- } else if (!ufs_qcom_is_link_active(hba)) {
- ufs_qcom_disable_lane_clks(host);
- }
+ /* reset the connected UFS device during power down */
+ if (ufs_qcom_is_link_off(hba) && host->device_reset)
+ ufs_qcom_device_reset_ctrl(hba, true);
return ufs_qcom_ice_suspend(host);
}
@@ -664,26 +750,11 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
int err;
- if (ufs_qcom_is_link_off(hba)) {
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed PHY power on: %d\n",
- __func__, err);
- return err;
- }
-
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
-
- } else if (!ufs_qcom_is_link_active(hba)) {
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
- }
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
return ufs_qcom_ice_resume(host);
}
@@ -874,17 +945,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
break;
case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, false)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- /*
- * we return error code at the end of the routine,
- * but continue to configure UFS_PHY_TX_LANE_ENABLE
- * and bus voting as usual
- */
- ret = -EINVAL;
- }
-
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
@@ -1073,12 +1133,20 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
+ * There are certain clocks which comes from the PHY so it needs
+ * to be managed together along with controller clocks which also
+ * provides a better power saving. Hence keep phy_power_off/on calls
+ * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be
+ * turned on/off along with UFS's clocks.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy;
+ int err;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1088,6 +1156,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
+ phy = host->generic_phy;
+
switch (status) {
case PRE_CHANGE:
if (on) {
@@ -1097,10 +1167,22 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
+
+ err = phy_power_off(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power off failed, ret=%d\n", err);
+ return err;
+ }
}
break;
case POST_CHANGE:
if (on) {
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power on failed, ret = %d\n", err);
+ return err;
+ }
+
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1360,29 +1442,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
struct ufs_clk_info *clki;
u32 cycles_in_1us = 0;
u32 core_clk_ctrl_reg;
+ unsigned long clk_freq;
int err;
+ if (hba->use_pm_opp && freq != ULONG_MAX) {
+ clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro");
+ if (clk_freq) {
+ cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ);
+ goto set_core_clk_ctrl;
+ }
+ }
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) &&
!strcmp(clki->name, "core_clk_unipro")) {
- if (!clki->max_freq)
+ if (!clki->max_freq) {
cycles_in_1us = 150; /* default for backwards compatibility */
- else if (freq == ULONG_MAX)
+ break;
+ }
+
+ if (freq == ULONG_MAX) {
cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
- else
- cycles_in_1us = ceil(freq, HZ_PER_MHZ);
+ break;
+ }
+ if (is_scale_up)
+ cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
+ else
+ cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ);
break;
}
}
+set_core_clk_ctrl:
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
@@ -1419,13 +1518,13 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long f
{
int ret;
- ret = ufs_qcom_cfg_timers(hba, true);
+ ret = ufs_qcom_cfg_timers(hba, true, freq);
if (ret) {
dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
return ret;
}
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, freq);
+ return ufs_qcom_set_core_clk_ctrl(hba, true, freq);
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@@ -1456,8 +1555,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
{
+ int ret;
+
+ ret = ufs_qcom_cfg_timers(hba, false, freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__);
+ return ret;
+ }
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, freq);
+ return ufs_qcom_set_core_clk_ctrl(hba, false, freq);
}
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
@@ -1609,6 +1715,85 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
return 0;
}
+static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int i, j, nminor = 0, testbus_len = 0;
+ u32 *testbus __free(kfree) = NULL;
+ char *prefix;
+
+ testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ for (j = 0; j < TSTBUS_MAX; j++) {
+ nminor = testbus_info[j].nminor;
+ prefix = testbus_info[j].prefix;
+ host->testbus.select_major = j;
+ testbus_len = nminor * sizeof(u32);
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ }
+}
+
+static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix, enum ufshcd_res id)
+{
+ u32 *regs __free(kfree) = NULL;
+ size_t pos;
+
+ if (offset % 4 != 0 || len % 4 != 0)
+ return -EINVAL;
+
+ regs = kzalloc(len, GFP_ATOMIC);
+ if (!regs)
+ return -ENOMEM;
+
+ for (pos = 0; pos < len; pos += 4)
+ regs[pos / 4] = readl(hba->res[id].base + offset + pos);
+
+ print_hex_dump(KERN_ERR, prefix,
+ len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+ 16, 4, regs, len, false);
+
+ return 0;
+}
+
+static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba)
+{
+ struct dump_info {
+ size_t offset;
+ size_t len;
+ const char *prefix;
+ enum ufshcd_res id;
+ };
+
+ struct dump_info mcq_dumps[] = {
+ {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ},
+ {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ},
+ {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS},
+ {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD},
+ {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD},
+ {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD},
+ {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD},
+ {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD},
+ {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD},
+ {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD},
+ {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD},
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) {
+ ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len,
+ mcq_dumps[i].prefix, mcq_dumps[i].id);
+ cond_resched();
+ }
+}
+
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
u32 reg;
@@ -1616,6 +1801,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
host = ufshcd_get_variant(hba);
+ dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT));
+ dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT));
+
+ dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT));
+ dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT));
+
+ dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n",
+ ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT));
+
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
@@ -1658,6 +1852,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
+
+ if (hba->mcq_enabled) {
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ);
+ ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers ");
+ }
+
+ /* ensure below dumps occur only in task context due to blocking calls. */
+ if (in_task()) {
+ /* Dump MCQ Host Vendor Specific Registers */
+ if (hba->mcq_enabled)
+ ufs_qcom_dump_mcq_hci_regs(hba);
+
+ /* voluntarily yield the CPU as we are dumping too much data */
+ ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
+ cond_resched();
+ ufs_qcom_dump_testbus(hba);
+ }
}
/**
@@ -1687,7 +1898,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
return 0;
}
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_dev_profile *p,
struct devfreq_simple_ondemand_data *d)
@@ -1699,13 +1909,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
hba->clk_scaling.suspend_on_no_request = true;
}
-#else
-static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
- struct devfreq_dev_profile *p,
- struct devfreq_simple_ondemand_data *data)
-{
-}
-#endif
/* Resources */
static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
@@ -1923,8 +2126,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
retain_and_null_ptr(qi);
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0) {
+ if (host->hw_ver.major >= 6) {
ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);
}
@@ -1933,11 +2135,53 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
return 0;
}
+static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba,
+ unsigned long freq, char *name)
+{
+ struct ufs_clk_info *clki;
+ struct dev_pm_opp *opp;
+ unsigned long clk_freq;
+ int idx = 0;
+ bool found = false;
+
+ opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true);
+ if (IS_ERR(opp)) {
+ dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq);
+ return 0;
+ }
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, name)) {
+ found = true;
+ break;
+ }
+
+ idx++;
+ }
+
+ if (!found) {
+ dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name);
+ dev_pm_opp_put(opp);
+ return 0;
+ }
+
+ clk_freq = dev_pm_opp_get_freq_indexed(opp, idx);
+
+ dev_pm_opp_put(opp);
+
+ return clk_freq;
+}
+
static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
{
- u32 gear = 0;
+ u32 gear = UFS_HS_DONT_CHANGE;
+ unsigned long unipro_freq;
- switch (freq) {
+ if (!hba->use_pm_opp)
+ return gear;
+
+ unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro");
+ switch (unipro_freq) {
case 403000000:
gear = UFS_HS_G5;
break;
@@ -1957,10 +2201,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
break;
default:
dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
- break;
+ return UFS_HS_DONT_CHANGE;
}
- return gear;
+ return min_t(u32, gear, hba->max_pwr_info.info.gear_rx);
}
/*