summaryrefslogtreecommitdiff
path: root/drivers/ufs/host/ufs-qcom.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host/ufs-qcom.c')
-rw-r--r--drivers/ufs/host/ufs-qcom.c239
1 files changed, 162 insertions, 77 deletions
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 37887ec68412..76fc70503a62 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -122,7 +122,9 @@ static const struct {
};
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
+static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba,
+ unsigned long freq, char *name);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@@ -506,10 +508,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
if (ret)
return ret;
- if (phy->power_count) {
+ if (phy->power_count)
phy_power_off(phy);
- phy_exit(phy);
- }
+
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
@@ -531,6 +532,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
goto out_disable_phy;
}
+ ret = phy_calibrate(phy);
+ if (ret) {
+ dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret);
+ goto out_disable_phy;
+ }
+
ufs_qcom_select_unipro_mode(host);
return 0;
@@ -551,11 +558,32 @@ out_disable_phy:
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
+ int err;
+
+ /* Enable UTP internal clock gating */
ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
ufshcd_readl(hba, REG_UFS_CFG2);
+
+ /* Enable Unipro internal clock gating */
+ err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
+ DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
+ PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL);
+out:
+ if (err)
+ dev_err(hba->dev, "hw clk gating enabled failed\n");
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -597,13 +625,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
*
* @hba: host controller instance
* @is_pre_scale_up: flag to check if pre scale up condition.
+ * @freq: target opp freq
* Return: zero for success and non-zero in case of a failure.
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
- unsigned long core_clk_rate = 0;
+ unsigned long clk_freq = 0;
u32 core_clk_cycles_per_us;
/*
@@ -615,22 +644,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
+ if (hba->use_pm_opp && freq != ULONG_MAX) {
+ clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk");
+ if (clk_freq)
+ goto cfg_timers;
+ }
+
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk")) {
+ if (freq == ULONG_MAX) {
+ clk_freq = clki->max_freq;
+ break;
+ }
+
if (is_pre_scale_up)
- core_clk_rate = clki->max_freq;
+ clk_freq = clki->max_freq;
else
- core_clk_rate = clk_get_rate(clki->clk);
+ clk_freq = clk_get_rate(clki->clk);
break;
}
}
+cfg_timers:
/* If frequency is smaller than 1MHz, set to 1MHz */
- if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
- core_clk_rate = DEFAULT_CLK_RATE_HZ;
+ if (clk_freq < DEFAULT_CLK_RATE_HZ)
+ clk_freq = DEFAULT_CLK_RATE_HZ;
- core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
+ core_clk_cycles_per_us = clk_freq / USEC_PER_SEC;
if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
/*
@@ -650,13 +691,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, false)) {
+ if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
- err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
+ err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
@@ -691,26 +732,17 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
if (status == PRE_CHANGE)
return 0;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
+ if (!ufs_qcom_is_link_active(hba))
ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- /* reset the connected UFS device during power down */
- ufs_qcom_device_reset_ctrl(hba, true);
- } else if (!ufs_qcom_is_link_active(hba)) {
- ufs_qcom_disable_lane_clks(host);
- }
+ /* reset the connected UFS device during power down */
+ if (ufs_qcom_is_link_off(hba) && host->device_reset)
+ ufs_qcom_device_reset_ctrl(hba, true);
return ufs_qcom_ice_suspend(host);
}
@@ -718,26 +750,11 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
int err;
- if (ufs_qcom_is_link_off(hba)) {
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed PHY power on: %d\n",
- __func__, err);
- return err;
- }
-
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
-
- } else if (!ufs_qcom_is_link_active(hba)) {
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
- }
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
return ufs_qcom_ice_resume(host);
}
@@ -928,17 +945,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
break;
case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, false)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- /*
- * we return error code at the end of the routine,
- * but continue to configure UFS_PHY_TX_LANE_ENABLE
- * and bus voting as usual
- */
- ret = -EINVAL;
- }
-
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
@@ -1127,12 +1133,20 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
+ * There are certain clocks which comes from the PHY so it needs
+ * to be managed together along with controller clocks which also
+ * provides a better power saving. Hence keep phy_power_off/on calls
+ * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be
+ * turned on/off along with UFS's clocks.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy;
+ int err;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1142,6 +1156,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
+ phy = host->generic_phy;
+
switch (status) {
case PRE_CHANGE:
if (on) {
@@ -1151,10 +1167,22 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
+
+ err = phy_power_off(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power off failed, ret=%d\n", err);
+ return err;
+ }
}
break;
case POST_CHANGE:
if (on) {
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power on failed, ret = %d\n", err);
+ return err;
+ }
+
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1414,29 +1442,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
struct ufs_clk_info *clki;
u32 cycles_in_1us = 0;
u32 core_clk_ctrl_reg;
+ unsigned long clk_freq;
int err;
+ if (hba->use_pm_opp && freq != ULONG_MAX) {
+ clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro");
+ if (clk_freq) {
+ cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ);
+ goto set_core_clk_ctrl;
+ }
+ }
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) &&
!strcmp(clki->name, "core_clk_unipro")) {
- if (!clki->max_freq)
+ if (!clki->max_freq) {
cycles_in_1us = 150; /* default for backwards compatibility */
- else if (freq == ULONG_MAX)
+ break;
+ }
+
+ if (freq == ULONG_MAX) {
cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
- else
- cycles_in_1us = ceil(freq, HZ_PER_MHZ);
+ break;
+ }
+ if (is_scale_up)
+ cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
+ else
+ cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ);
break;
}
}
+set_core_clk_ctrl:
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
@@ -1473,13 +1518,13 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long f
{
int ret;
- ret = ufs_qcom_cfg_timers(hba, true);
+ ret = ufs_qcom_cfg_timers(hba, true, freq);
if (ret) {
dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
return ret;
}
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, freq);
+ return ufs_qcom_set_core_clk_ctrl(hba, true, freq);
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@@ -1510,8 +1555,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
{
+ int ret;
+
+ ret = ufs_qcom_cfg_timers(hba, false, freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__);
+ return ret;
+ }
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, freq);
+ return ufs_qcom_set_core_clk_ctrl(hba, false, freq);
}
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
@@ -1846,7 +1898,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
return 0;
}
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_dev_profile *p,
struct devfreq_simple_ondemand_data *d)
@@ -1858,13 +1909,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
hba->clk_scaling.suspend_on_no_request = true;
}
-#else
-static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
- struct devfreq_dev_profile *p,
- struct devfreq_simple_ondemand_data *data)
-{
-}
-#endif
/* Resources */
static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
@@ -2082,8 +2126,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
retain_and_null_ptr(qi);
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0) {
+ if (host->hw_ver.major >= 6) {
ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);
}
@@ -2092,11 +2135,53 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
return 0;
}
+static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba,
+ unsigned long freq, char *name)
+{
+ struct ufs_clk_info *clki;
+ struct dev_pm_opp *opp;
+ unsigned long clk_freq;
+ int idx = 0;
+ bool found = false;
+
+ opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true);
+ if (IS_ERR(opp)) {
+ dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq);
+ return 0;
+ }
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, name)) {
+ found = true;
+ break;
+ }
+
+ idx++;
+ }
+
+ if (!found) {
+ dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name);
+ dev_pm_opp_put(opp);
+ return 0;
+ }
+
+ clk_freq = dev_pm_opp_get_freq_indexed(opp, idx);
+
+ dev_pm_opp_put(opp);
+
+ return clk_freq;
+}
+
static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
{
- u32 gear = 0;
+ u32 gear = UFS_HS_DONT_CHANGE;
+ unsigned long unipro_freq;
+
+ if (!hba->use_pm_opp)
+ return gear;
- switch (freq) {
+ unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro");
+ switch (unipro_freq) {
case 403000000:
gear = UFS_HS_G5;
break;
@@ -2116,10 +2201,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
break;
default:
dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
- break;
+ return UFS_HS_DONT_CHANGE;
}
- return gear;
+ return min_t(u32, gear, hba->max_pwr_info.info.gear_rx);
}
/*