summaryrefslogtreecommitdiff
path: root/drivers/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufs-mcq.c15
-rw-r--r--drivers/ufs/core/ufs-sysfs.c195
-rw-r--r--drivers/ufs/core/ufs_trace.h1
-rw-r--r--drivers/ufs/core/ufs_trace_types.h24
-rw-r--r--drivers/ufs/core/ufshcd.c290
-rw-r--r--drivers/ufs/host/ufs-exynos.c14
-rw-r--r--drivers/ufs/host/ufs-mediatek.c682
-rw-r--r--drivers/ufs/host/ufs-mediatek.h33
-rw-r--r--drivers/ufs/host/ufs-qcom.c359
-rw-r--r--drivers/ufs/host/ufs-qcom.h37
-rw-r--r--drivers/ufs/host/ufshcd-pci.c34
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c33
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h1
13 files changed, 1302 insertions, 416 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 1e50675772fe..c9bdd4140fd0 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -29,6 +29,10 @@
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
+#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK |\
+ MCQ_CQ_EVENT_STATUS)
+
/* Max mcq register polling time in microseconds */
#define MCQ_POLL_US 500000
@@ -243,7 +247,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
&hwq->sqe_dma_addr,
GFP_KERNEL);
- if (!hwq->sqe_dma_addr) {
+ if (!hwq->sqe_base_addr) {
dev_err(hba->dev, "SQE allocation failed\n");
return -ENOMEM;
}
@@ -252,7 +256,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
&hwq->cqe_dma_addr,
GFP_KERNEL);
- if (!hwq->cqe_dma_addr) {
+ if (!hwq->cqe_base_addr) {
dev_err(hba->dev, "CQE allocation failed\n");
return -ENOMEM;
}
@@ -355,9 +359,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
+ u32 intrs;
u16 qsize;
int i;
+ /* Enable required interrupts */
+ intrs = UFSHCD_ENABLE_MCQ_INTRS;
+ if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
+ intrs &= ~MCQ_CQ_EVENT_STATUS;
+ ufshcd_enable_intr(hba, intrs);
+
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
hwq->id = i;
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index fcb4b14a710f..0086816b27cd 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -5,6 +5,7 @@
#include <linux/string.h>
#include <linux/bitfield.h>
#include <linux/unaligned.h>
+#include <linux/string_choices.h>
#include <ufs/ufs.h>
#include <ufs/unipro.h>
@@ -87,6 +88,23 @@ static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status)
}
}
+static const char * const ufs_hid_states[] = {
+ [HID_IDLE] = "idle",
+ [ANALYSIS_IN_PROGRESS] = "analysis_in_progress",
+ [DEFRAG_REQUIRED] = "defrag_required",
+ [DEFRAG_IN_PROGRESS] = "defrag_in_progress",
+ [DEFRAG_COMPLETED] = "defrag_completed",
+ [DEFRAG_NOT_REQUIRED] = "defrag_not_required",
+};
+
+static const char *ufs_hid_state_to_string(enum ufs_hid_state state)
+{
+ if (state < NUM_UFS_HID_STATES)
+ return ufs_hid_states[state];
+
+ return "unknown";
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -494,6 +512,8 @@ static ssize_t pm_qos_enable_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ guard(mutex)(&hba->pm_qos_mutex);
+
return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
}
@@ -1499,7 +1519,7 @@ static ssize_t _name##_show(struct device *dev, \
ret = -EINVAL; \
goto out; \
} \
- ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
+ ret = sysfs_emit(buf, "%s\n", str_true_false(flag)); \
out: \
up(&hba->host_sem); \
return ret; \
@@ -1763,6 +1783,178 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
.attrs = ufs_sysfs_attributes,
};
+static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u32 *attr_val)
+{
+ int ret;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t analysis_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ if (sysfs_streq(buf, "enable"))
+ mode = HID_ANALYSIS_ENABLE;
+ else if (sysfs_streq(buf, "disable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
+ else
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_WO(analysis_trigger);
+
+static ssize_t defrag_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ if (sysfs_streq(buf, "enable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_ENABLE;
+ else if (sysfs_streq(buf, "disable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
+ else
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_WO(defrag_trigger);
+
+static ssize_t fragmented_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static DEVICE_ATTR_RO(fragmented_size);
+
+static ssize_t defrag_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_SIZE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static ssize_t defrag_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_SIZE, &value);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(defrag_size);
+
+static ssize_t progress_ratio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static DEVICE_ATTR_RO(progress_ratio);
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_STATE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value));
+}
+
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *ufs_sysfs_hid[] = {
+ &dev_attr_analysis_trigger.attr,
+ &dev_attr_defrag_trigger.attr,
+ &dev_attr_fragmented_size.attr,
+ &dev_attr_defrag_size.attr,
+ &dev_attr_progress_ratio.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return hba->dev_info.hid_sup ? attr->mode : 0;
+}
+
+static const struct attribute_group ufs_sysfs_hid_group = {
+ .name = "hid",
+ .attrs = ufs_sysfs_hid,
+ .is_visible = ufs_sysfs_hid_is_visible,
+};
+
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
@@ -1777,6 +1969,7 @@ static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_string_descriptors_group,
&ufs_sysfs_flags_group,
&ufs_sysfs_attributes_group,
+ &ufs_sysfs_hid_group,
NULL,
};
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
index caa32e23ffa5..584c2b5c6ad9 100644
--- a/drivers/ufs/core/ufs_trace.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -11,6 +11,7 @@
#include <ufs/ufs.h>
#include <linux/tracepoint.h>
+#include "ufs_trace_types.h"
#define str_opcode(opcode) \
__print_symbolic(opcode, \
diff --git a/drivers/ufs/core/ufs_trace_types.h b/drivers/ufs/core/ufs_trace_types.h
new file mode 100644
index 000000000000..f2d5ad1d92b9
--- /dev/null
+++ b/drivers/ufs/core/ufs_trace_types.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _UFS_TRACE_TYPES_H_
+#define _UFS_TRACE_TYPES_H_
+
+enum ufs_trace_str_t {
+ UFS_CMD_SEND,
+ UFS_CMD_COMP,
+ UFS_DEV_COMP,
+ UFS_QUERY_SEND,
+ UFS_QUERY_COMP,
+ UFS_QUERY_ERR,
+ UFS_TM_SEND,
+ UFS_TM_COMP,
+ UFS_TM_ERR
+};
+
+enum ufs_trace_tsf_t {
+ UFS_TSF_CDB,
+ UFS_TSF_OSF,
+ UFS_TSF_TM_INPUT,
+ UFS_TSF_TM_OUTPUT
+};
+
+#endif /* _UFS_TRACE_TYPES_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 50adfb8b335b..d9632d7c5f01 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -45,11 +45,6 @@
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
-#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
- UFSHCD_ERROR_MASK |\
- MCQ_CQ_EVENT_STATUS)
-
-
/* UIC command timeout, unit: ms */
enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
@@ -316,6 +311,9 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
.model = "THGLF2G9D8KBADG",
.quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
+ { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
+ .model = "THGJFJT1E45BATP",
+ .quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT },
{}
};
@@ -364,6 +362,34 @@ void ufshcd_disable_irq(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val | intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val & ~intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
@@ -578,10 +604,12 @@ void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
lrbp = &hba->lrb[tag];
- dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
- tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
- dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
- tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
+ if (hba->monitor.enabled) {
+ dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", tag,
+ div_u64(lrbp->issue_time_stamp_local_clock, 1000));
+ dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", tag,
+ div_u64(lrbp->compl_time_stamp_local_clock, 1000));
+ }
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@@ -1017,6 +1045,7 @@ EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
*/
void ufshcd_pm_qos_init(struct ufs_hba *hba)
{
+ guard(mutex)(&hba->pm_qos_mutex);
if (hba->pm_qos_enabled)
return;
@@ -1033,6 +1062,8 @@ void ufshcd_pm_qos_init(struct ufs_hba *hba)
*/
void ufshcd_pm_qos_exit(struct ufs_hba *hba)
{
+ guard(mutex)(&hba->pm_qos_mutex);
+
if (!hba->pm_qos_enabled)
return;
@@ -1047,6 +1078,8 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
*/
static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{
+ guard(mutex)(&hba->pm_qos_mutex);
+
if (!hba->pm_qos_enabled)
return;
@@ -1275,7 +1308,7 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
-static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
+static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba,
u64 wait_timeout_us)
{
int ret = 0;
@@ -1403,7 +1436,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
- ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
+ ufshcd_wait_for_pending_cmds(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
@@ -2202,11 +2235,13 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
- ktime_t curr_t = ktime_get();
+ ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ curr_t = ktime_get();
+
guard(spinlock_irqsave)(&hba->clk_scaling.lock);
if (!hba->clk_scaling.active_reqs++)
@@ -2326,10 +2361,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
- lrbp->issue_time_stamp = ktime_get();
- lrbp->issue_time_stamp_local_clock = local_clock();
- lrbp->compl_time_stamp = ktime_set(0, 0);
- lrbp->compl_time_stamp_local_clock = 0;
+ if (hba->monitor.enabled) {
+ lrbp->issue_time_stamp = ktime_get();
+ lrbp->issue_time_stamp_local_clock = local_clock();
+ lrbp->compl_time_stamp = ktime_set(0, 0);
+ lrbp->compl_time_stamp_local_clock = 0;
+ }
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
if (lrbp->cmd)
ufshcd_clk_scaling_start_busy(hba);
@@ -2566,7 +2603,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Return: 0 only if success.
+ * Return: 0 if successful; < 0 upon failure.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
@@ -2596,6 +2633,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ unsigned long flags;
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
@@ -2605,6 +2643,10 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -2682,32 +2724,6 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_enable_intr - enable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set |= intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_disable_intr - disable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set &= ~intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
* ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @hba: per adapter instance
@@ -2826,8 +2842,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(ucd_req_ptr + 1, query->descriptor, len);
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -2840,8 +2854,6 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
.transaction_code = UPIU_TRANSACTION_NOP_OUT,
.task_tag = lrbp->task_tag,
};
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -2867,6 +2879,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
else
ret = -EINVAL;
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
return ret;
}
@@ -3074,6 +3088,9 @@ static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
hba->dev_cmd.type = cmd_type;
}
+/*
+ * Return: 0 upon success; < 0 upon failure.
+ */
static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
{
@@ -3186,9 +3203,14 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
}
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
{
@@ -3280,6 +3302,10 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
ufshcd_release(hba);
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
{
@@ -3301,7 +3327,8 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* @cmd_type: specifies the type (NOP, Query...)
* @timeout: timeout in milliseconds
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
@@ -3347,6 +3374,10 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
(*request)->upiu_req.selector = selector;
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
@@ -3378,7 +3409,8 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
@@ -3446,8 +3478,9 @@ out_unlock:
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
- * Return: 0 for success, non-zero in case of failure.
-*/
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
@@ -3509,8 +3542,9 @@ out_unlock:
* @attr_val: the attribute value after the query request
* completes
*
- * Return: 0 for success, non-zero in case of failure.
-*/
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
@@ -3535,6 +3569,10 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
return ret;
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len)
@@ -3608,7 +3646,8 @@ out_unlock:
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -3638,7 +3677,8 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success, non-zero otherwise.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
@@ -3818,7 +3858,7 @@ out:
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success, non-zero otherwise.
+ * Return: 0 in case of success; < 0 upon failure.
*/
static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
int lun,
@@ -4254,6 +4294,30 @@ out:
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
/**
+ * ufshcd_dme_rmw - get modify set a DME attribute
+ * @hba: per adapter instance
+ * @mask: indicates which bits to clear from the value that has been read
+ * @val: actual value to write
+ * @attr: dme attribute
+ */
+int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask,
+ u32 val, u32 attr)
+{
+ u32 cfg = 0;
+ int err;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg);
+ if (err)
+ return err;
+
+ cfg &= ~mask;
+ cfg |= (val & mask);
+
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg);
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_rmw);
+
+/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
*
@@ -4275,7 +4339,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
- bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -4286,15 +4349,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
goto out_unlock;
}
hba->uic_async_done = &uic_async_done;
- if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
- ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
- /*
- * Make sure UIC command completion interrupt is disabled before
- * issuing UIC command.
- */
- ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- reenable_intr = true;
- }
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
@@ -4338,9 +4393,7 @@ out:
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
- if (reenable_intr)
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
- if (ret) {
+ if (ret && !hba->pm_op_in_progress) {
ufshcd_set_link_broken(hba);
ufshcd_schedule_eh_work(hba);
}
@@ -4348,6 +4401,14 @@ out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
+ /*
+ * If the h8 exit fails during the runtime resume process, it becomes
+ * stuck and cannot be recovered through the error handler. To fix
+ * this, use link recovery instead of the error handler.
+ */
+ if (ret && hba->pm_op_in_progress)
+ ret = ufshcd_link_recovery(hba);
+
return ret;
}
@@ -4362,28 +4423,17 @@ int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
+ if (uic_cmd->argument1 != UIC_ARG_MIB(PA_PWRMODE) ||
+ uic_cmd->command != UIC_CMD_DME_SET)
+ return ufshcd_send_uic_cmd(hba, uic_cmd);
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
ufshcd_hold(hba);
-
- if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
- uic_cmd->command == UIC_CMD_DME_SET) {
- ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
- goto out;
- }
-
- mutex_lock(&hba->uic_cmd_mutex);
- ufshcd_add_delay_before_dme_cmd(hba);
-
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
- if (!ret)
- ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
-
- mutex_unlock(&hba->uic_cmd_mutex);
-
-out:
+ ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
ufshcd_release(hba);
+
return ret;
}
@@ -4745,7 +4795,8 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
*
* Set fDeviceInit flag and poll until device toggles it.
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
@@ -4796,7 +4847,7 @@ out:
* 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
- * Return: 0 on success, non-zero value on failure.
+ * Return: 0 if successful; < 0 upon failure.
*/
int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
@@ -5099,7 +5150,8 @@ out:
* not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
* and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_verify_dev_init(struct ufs_hba *hba)
{
@@ -5523,9 +5575,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
irqreturn_t retval = IRQ_NONE;
struct uic_command *cmd;
- spin_lock(hba->host->host_lock);
+ guard(spinlock_irqsave)(hba->host->host_lock);
cmd = hba->active_uic_cmd;
- if (WARN_ON_ONCE(!cmd))
+ if (!cmd)
goto unlock;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
@@ -5550,8 +5602,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
unlock:
- spin_unlock(hba->host->host_lock);
-
return retval;
}
@@ -5581,8 +5631,10 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
enum utp_ocs ocs;
lrbp = &hba->lrb[task_tag];
- lrbp->compl_time_stamp = ktime_get();
- lrbp->compl_time_stamp_local_clock = local_clock();
+ if (hba->monitor.enabled) {
+ lrbp->compl_time_stamp = ktime_get();
+ lrbp->compl_time_stamp_local_clock = local_clock();
+ }
cmd = lrbp->cmd;
if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -5833,7 +5885,8 @@ static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
* as the device is allowed to manage its own way of handling background
* operations.
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
{
@@ -5872,7 +5925,8 @@ out:
* host is idle so that BKOPS are managed effectively without any negative
* impacts.
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
{
@@ -6022,6 +6076,10 @@ out:
__func__, err);
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
{
struct utp_upiu_query_v4_0 *upiu_resp;
@@ -6410,13 +6468,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
-static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+void ufshcd_force_error_recovery(struct ufs_hba *hba)
{
spin_lock_irq(hba->host->host_lock);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
spin_unlock_irq(hba->host->host_lock);
}
+EXPORT_SYMBOL_GPL(ufshcd_force_error_recovery);
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
@@ -6884,7 +6943,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
- spin_lock(hba->host->host_lock);
+ guard(spinlock_irqsave)(hba->host->host_lock);
hba->errors |= UFSHCD_ERROR_MASK & intr_status;
if (hba->errors & INT_FATAL_ERRORS) {
@@ -6943,7 +7002,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
*/
hba->errors = 0;
hba->uic_error = 0;
- spin_unlock(hba->host->host_lock);
+
return retval;
}
@@ -7102,14 +7161,19 @@ static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
struct ufs_hba *hba = __hba;
+ u32 intr_status, enabled_intr_status;
/* Move interrupt handling to thread when MCQ & ESI are not enabled */
if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
return IRQ_WAKE_THREAD;
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+
/* Directly handle interrupts since MCQ ESI handlers does the hard job */
- return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) &
- ufshcd_readl(hba, REG_INTERRUPT_ENABLE));
+ return ufshcd_sl_intr(hba, enabled_intr_status);
}
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
@@ -7413,7 +7477,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
* @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
* @dir: DMA direction
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
@@ -8420,6 +8485,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
+ dev_info->hid_sup = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) &
+ UFS_DEV_HID_SUPPORT;
+
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
@@ -8729,7 +8798,8 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
struct utp_upiu_query_v4_0 *upiu_data;
- if (dev_info->wspecversion < 0x400)
+ if (dev_info->wspecversion < 0x400 ||
+ hba->dev_quirks & UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT)
return;
ufshcd_dev_man_lock(hba);
@@ -8856,16 +8926,11 @@ err:
static void ufshcd_config_mcq(struct ufs_hba *hba)
{
int ret;
- u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
- intrs = UFSHCD_ENABLE_MCQ_INTRS;
- if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
- intrs &= ~MCQ_CQ_EVENT_STATUS;
- ufshcd_enable_intr(hba, intrs);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
@@ -10476,8 +10541,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = devm_add_action_or_reset(dev, ufshcd_devres_release,
host);
if (err)
- return dev_err_probe(dev, err,
- "failed to add ufshcd dealloc action\n");
+ return err;
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
@@ -10700,6 +10764,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
mutex_init(&hba->ee_ctrl_mutex);
mutex_init(&hba->wb_mutex);
+
+ /* Initialize mutex for PM QoS request synchronization */
+ mutex_init(&hba->pm_qos_mutex);
+
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 3e545af536e5..70d195179eba 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -776,7 +776,7 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
u32 mask, sync_len;
enum {
SYNC_LEN_G1 = 80 * 1000, /* 80us */
- SYNC_LEN_G2 = 40 * 1000, /* 44us */
+ SYNC_LEN_G2 = 40 * 1000, /* 40us */
SYNC_LEN_G3 = 20 * 1000, /* 20us */
};
int i;
@@ -1110,8 +1110,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
- hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
- hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
+ hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
+ hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
@@ -1896,6 +1896,13 @@ static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
return 0;
}
+static int fsd_ufs_suspend(struct exynos_ufs *ufs)
+{
+ exynos_ufs_gate_clks(ufs);
+ hci_writel(ufs, 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static inline u32 get_mclk_period_unipro_18(struct exynos_ufs *ufs)
{
return (16 * 1000 * 1000000UL / ufs->mclk_rate);
@@ -2162,6 +2169,7 @@ static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.pre_link = fsd_ufs_pre_link,
.post_link = fsd_ufs_post_link,
.pre_pwr_change = fsd_ufs_pre_pwr_change,
+ .suspend = fsd_ufs_suspend,
};
static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 182f58d0c9db..758a393a9de1 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -29,6 +29,7 @@
#include "ufs-mediatek-sip.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
+static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
@@ -50,6 +51,7 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
static const struct of_device_id ufs_mtk_of_match[] = {
{ .compatible = "mediatek,mt8183-ufshci" },
+ { .compatible = "mediatek,mt8195-ufshci" },
{},
};
MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
@@ -96,49 +98,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+ return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
}
static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
+ return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL;
}
static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+ return host->caps & UFS_MTK_CAP_BROKEN_VCC;
}
static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+ return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO;
}
static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
+ return host->caps & UFS_MTK_CAP_TX_SKEW_FIX;
}
static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS);
+ return host->caps & UFS_MTK_CAP_RTFF_MTCMOS;
}
static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
+ return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM;
+}
+
+static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+
+ return mclk->ufs_sel_clki &&
+ mclk->ufs_sel_max_clki &&
+ mclk->ufs_sel_min_clki;
}
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
@@ -267,6 +279,13 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
+
+ /* DDR_EN setting */
+ if (host->ip_ver >= IP_VER_MT6989) {
+ ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
+ 0x453000, REG_UFS_MMIO_OPT_CTRL_0);
+ }
+
}
return 0;
@@ -344,7 +363,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
+ /*
+ * If clock on timeout, assume clock is off, notify tfa do clock
+ * off setting.(keep DIFN disable, release resource)
+ * If clock off timeout, assume clock will off finally,
+ * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
+ */
+ if (on)
+ ufs_mtk_ref_clk_notify(false, POST_CHANGE, res);
+ else
+ host->ref_clk_enabled = false;
return -ETIMEDOUT;
@@ -388,7 +416,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
}
}
-static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
+static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
unsigned long retry_ms)
{
u64 timeout, time_checked;
@@ -424,8 +452,12 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
break;
} while (time_checked < timeout);
- if (wait_idle && sm != VS_HCE_BASE)
+ if (wait_idle && sm != VS_HCE_BASE) {
dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
@@ -663,6 +695,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
+ if (of_property_read_bool(np, "mediatek,ufs-broken-rtc"))
+ host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -768,8 +803,14 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
clk_pwr_off = true;
}
- if (clk_pwr_off)
+ if (clk_pwr_off) {
ufs_mtk_pwr_ctrl(hba, false);
+ } else {
+ dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n",
+ hba->ahit,
+ ufshcd_readl(hba,
+ REG_AUTO_HIBERNATE_IDLE_TIMER));
+ }
ufs_mtk_mcq_disable_irq(hba);
} else if (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
@@ -779,6 +820,91 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
return ret;
}
+static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct blk_mq_tag_set *tag_set = &hba->host->tag_set;
+ struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT];
+ unsigned int nr = map->nr_queues;
+ unsigned int q_index;
+
+ q_index = map->mq_map[cpu];
+ if (q_index >= nr) {
+ dev_err(hba->dev, "hwq index %d exceed %d\n",
+ q_index, nr);
+ return MTK_MCQ_INVALID_IRQ;
+ }
+
+ return host->mcq_intr_info[q_index].irq;
+}
+
+static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
+{
+ unsigned int irq, _cpu;
+ int ret;
+
+ irq = ufs_mtk_mcq_get_irq(hba, cpu);
+ if (irq == MTK_MCQ_INVALID_IRQ) {
+ dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
+ return;
+ }
+
+ /* force migrate irq of cpu0 to cpu3 */
+ _cpu = (cpu == 0) ? 3 : cpu;
+ ret = irq_set_affinity(irq, cpumask_of(_cpu));
+ if (ret) {
+ dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n",
+ irq, _cpu);
+ return;
+ }
+ dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu);
+}
+
+static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver)
+{
+ bool is_legacy = false;
+
+ switch (hw_ip_ver) {
+ case IP_LEGACY_VER_MT6893:
+ case IP_LEGACY_VER_MT6781:
+ /* can add other legacy chipset ID here accordingly */
+ is_legacy = true;
+ break;
+ default:
+ break;
+ }
+ dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
+
+ return is_legacy;
+}
+
+/*
+ * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since
+ * project MT6878. In order to perform correct version comparison,
+ * version number is changed by SW for the following projects.
+ * IP_VER_MT6983 0x00360000 to 0x10360000
+ * IP_VER_MT6897 0x01440000 to 0x10440000
+ * IP_VER_MT6989 0x01450000 to 0x10450000
+ * IP_VER_MT6991 0x01460000 to 0x10460000
+ */
+static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 hw_ip_ver;
+
+ hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+
+ if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) ||
+ ((hw_ip_ver & (0xFF << 24)) == 0)) {
+ hw_ip_ver &= ~(0xFF << 24);
+ hw_ip_ver |= (0x1 << 28);
+ }
+
+ host->ip_ver = hw_ip_ver;
+
+ host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver);
+}
+
static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -818,8 +944,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
- struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki, *clki_tmp;
+ struct device *dev = hba->dev;
+ struct regulator *reg;
+ u32 volt;
/*
* Find private clocks and store them in struct ufs_mtk_clk.
@@ -837,15 +965,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
host->mclk.ufs_sel_min_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde")) {
+ host->mclk.ufs_fde_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_fde_max_src")) {
+ host->mclk.ufs_fde_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde_min_src")) {
+ host->mclk.ufs_fde_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
}
}
- if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
- !mclk->ufs_sel_min_clki) {
+ list_for_each_entry(clki, head, list) {
+ dev_info(hba->dev, "clk \"%s\" present", clki->name);
+ }
+
+ if (!ufs_mtk_is_clk_scale_ready(hba)) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
dev_info(hba->dev,
"%s: Clk-scaling not ready. Feature disabled.",
__func__);
+ return;
+ }
+
+ /*
+ * Default get vcore if dts have these settings.
+ * No matter clock scaling support or not. (may disable by customer)
+ */
+ reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
+ if (IS_ERR(reg)) {
+ dev_info(dev, "failed to get dvfsrc-vcore: %ld",
+ PTR_ERR(reg));
+ return;
+ }
+
+ if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
+ &volt)) {
+ dev_info(dev, "failed to get clk-scale-up-vcore-min");
+ return;
+ }
+
+ host->mclk.reg_vcore = reg;
+ host->mclk.vcore_volt = volt;
+
+ /* If default boot is max gear, request vcore */
+ if (reg && volt && host->clk_scale_up) {
+ if (regulator_set_voltage(reg, volt, INT_MAX)) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ }
}
}
@@ -859,7 +1029,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
struct arm_smccc_res res;
int err, ver;
- if (hba->vreg_info.vcc)
+ if (info->vcc)
return 0;
if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
@@ -916,6 +1086,80 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
}
}
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ u32 ah_ms = 10;
+ u32 ah_scale, ah_timer;
+ u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000};
+
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) {
+ ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK,
+ hba->ahit);
+ ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+ hba->ahit);
+ if (ah_scale <= 5)
+ ah_ms = ah_timer * scale_us[ah_scale] / 1000;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.delay_ms = max(ah_ms, 10U);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+}
+
+/* Convert microseconds to Auto-Hibernate Idle Timer register value */
+static u32 ufs_mtk_us_to_ahit(unsigned int timer)
+{
+ unsigned int scale;
+
+ for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
+ timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
+
+ return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
+}
+
+static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
+{
+ unsigned int us;
+
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ switch (hba->dev_info.wmanufacturerid) {
+ case UFS_VENDOR_SAMSUNG:
+ /* configure auto-hibern8 timer to 3.5 ms */
+ us = 3500;
+ break;
+
+ case UFS_VENDOR_MICRON:
+ /* configure auto-hibern8 timer to 2 ms */
+ us = 2000;
+ break;
+
+ default:
+ /* configure auto-hibern8 timer to 1 ms */
+ us = 1000;
+ break;
+ }
+
+ hba->ahit = ufs_mtk_us_to_ahit(us);
+ }
+
+ ufs_mtk_setup_clk_gating(hba);
+}
+
+static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba)
+{
+ /* UFS version is below 4.0, clock scaling is not necessary */
+ if ((hba->dev_info.wspecversion < 0x0400) &&
+ ufs_mtk_is_clk_scale_ready(hba)) {
+ hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
+
+ _ufs_mtk_clk_scale(hba, false);
+ }
+}
+
static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -1014,13 +1258,17 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clk scaling*/
hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ host->clk_scale_up = true; /* default is max freq */
/* Set runtime pm delay to replace default */
shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
+
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
- hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+ if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC)
+ hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
@@ -1050,7 +1298,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
- host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+ ufs_mtk_get_hw_ip_version(hba);
goto out;
@@ -1077,6 +1325,10 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
dev_req_params->gear_rx < UFS_HS_G4)
return false;
+ if (dev_req_params->pwr_tx == SLOW_MODE ||
+ dev_req_params->pwr_rx == SLOW_MODE)
+ return false;
+
return true;
}
@@ -1092,6 +1344,10 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
host_params.hs_rx_gear = UFS_HS_G5;
host_params.hs_tx_gear = UFS_HS_G5;
+ if (dev_max_params->pwr_rx == SLOW_MODE ||
+ dev_max_params->pwr_tx == SLOW_MODE)
+ host_params.desired_working_mode = UFS_PWM_MODE;
+
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_info("%s: failed to determine capabilities\n",
@@ -1115,6 +1371,28 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
PA_NO_ADAPT);
+ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+ }
+
ret = ufshcd_uic_change_pwr_mode(hba,
FASTAUTO_MODE << 4 | FASTAUTO_MODE);
@@ -1124,10 +1402,59 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
}
- if (host->hw_ver.major >= 3) {
+ /* if already configured to the requested pwr_mode, skip adapt */
+ if (dev_req_params->gear_rx == hba->pwr_info.gear_rx &&
+ dev_req_params->gear_tx == hba->pwr_info.gear_tx &&
+ dev_req_params->lane_rx == hba->pwr_info.lane_rx &&
+ dev_req_params->lane_tx == hba->pwr_info.lane_tx &&
+ dev_req_params->pwr_rx == hba->pwr_info.pwr_rx &&
+ dev_req_params->pwr_tx == hba->pwr_info.pwr_tx &&
+ dev_req_params->hs_rate == hba->pwr_info.hs_rate) {
+ return ret;
+ }
+
+ if (dev_req_params->pwr_rx == FAST_MODE ||
+ dev_req_params->pwr_rx == FASTAUTO_MODE) {
+ if (host->hw_ver.major >= 3) {
+ ret = ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ } else {
+ ret = ufshcd_dme_configure_adapt(hba,
+ dev_req_params->gear_tx,
+ PA_NO_ADAPT);
+ }
+ } else {
ret = ufshcd_dme_configure_adapt(hba,
- dev_req_params->gear_tx,
- PA_INITIAL_ADAPT);
+ dev_req_params->gear_tx,
+ PA_NO_ADAPT);
+ }
+
+ return ret;
+}
+
+static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
+{
+ int ret;
+
+ /* disable auto-hibern8 */
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ /* wait host return to idle state when auto-hibern8 off */
+ ret = ufs_mtk_wait_idle_state(hba, 5);
+ if (ret)
+ goto out;
+
+ ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
+
+out:
+ if (ret) {
+ dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
+
+ ufshcd_force_error_recovery(hba);
+
+ /* trigger error handler and break suspend */
+ ret = -EBUSY;
}
return ret;
@@ -1139,13 +1466,20 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
+ static u32 reg;
switch (stage) {
case PRE_CHANGE:
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ ufs_mtk_auto_hibern8_disable(hba);
+ }
ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
dev_req_params);
break;
case POST_CHANGE:
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
break;
default:
ret = -EINVAL;
@@ -1179,6 +1513,7 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
{
int ret;
u32 tmp;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
ufs_mtk_get_controller_version(hba);
@@ -1204,34 +1539,33 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+ /* Enable the 1144 functions setting */
+ if (host->ip_ver == IP_VER_MT6989) {
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
+ if (ret)
+ return ret;
+
+ tmp |= 0x10;
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
+ }
+
return ret;
}
-static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+static void ufs_mtk_post_link(struct ufs_hba *hba)
{
- u32 ah_ms;
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 tmp;
- if (ufshcd_is_clkgating_allowed(hba)) {
- if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
- ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
- hba->ahit);
- else
- ah_ms = 10;
- ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
+ /* fix device PA_INIT no adapt */
+ if (host->ip_ver >= IP_VER_MT6899) {
+ ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
+ tmp |= 0x100;
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
}
-}
-static void ufs_mtk_post_link(struct ufs_hba *hba)
-{
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
-
- /* will be configured during probe hba */
- if (ufshcd_is_auto_hibern8_supported(hba))
- hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
-
- ufs_mtk_setup_clk_gating(hba);
}
static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
@@ -1258,11 +1592,11 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
- /* disable hba before device reset */
- ufshcd_hba_stop(hba);
-
ufs_mtk_device_reset_ctrl(0, res);
+ /* disable hba in middle of device reset */
+ ufshcd_hba_stop(hba);
+
/*
* The reset signal is active low. UFS devices shall detect
* more than or equal to 1us of positive or negative RST_n
@@ -1299,7 +1633,11 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
return err;
/* Check link state to make sure exit h8 success */
- ufs_mtk_wait_idle_state(hba, 5);
+ err = ufs_mtk_wait_idle_state(hba, 5);
+ if (err) {
+ dev_warn(hba->dev, "wait idle fail, err=%d\n", err);
+ return err;
+ }
err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
if (err) {
dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
@@ -1344,6 +1682,9 @@ static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
{
struct ufs_vreg *vccqx = NULL;
+ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
+ return;
+
if (hba->vreg_info.vccq)
vccqx = hba->vreg_info.vccq;
else
@@ -1398,21 +1739,6 @@ static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
}
}
-static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
-{
- int ret;
-
- /* disable auto-hibern8 */
- ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
-
- /* wait host return to idle state when auto-hibern8 off */
- ufs_mtk_wait_idle_state(hba, 5);
-
- ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
- if (ret)
- dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
-}
-
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1421,7 +1747,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba))
- ufs_mtk_auto_hibern8_disable(hba);
+ return ufs_mtk_auto_hibern8_disable(hba);
return 0;
}
@@ -1479,8 +1805,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
return 0;
+
fail:
- return ufshcd_link_recovery(hba);
+ /*
+ * Check if the platform (parent) device has resumed, and ensure that
+ * power, clock, and MTCMOS are all turned on.
+ */
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n",
+ hba->dev->power.request,
+ hba->dev->power.runtime_status,
+ hba->dev->power.runtime_error);
+ }
+
+ return 0; /* Cannot return a failure, otherwise, the I/O will hang. */
}
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
@@ -1505,6 +1844,13 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
+ unsigned int cpu;
+
+ if (hba->mcq_enabled) {
+ /* Iterate all cpus to set affinity for mcq irqs */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ ufs_mtk_mcq_set_irq_affinity(hba, cpu);
+ }
if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
@@ -1556,6 +1902,8 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
ufs_mtk_vreg_fix_vcc(hba);
ufs_mtk_vreg_fix_vccqx(hba);
+ ufs_mtk_fix_ahit(hba);
+ ufs_mtk_fix_clock_scaling(hba);
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
@@ -1598,24 +1946,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
hba->vps->ondemand_data.downdifferential = 20;
}
-/**
- * ufs_mtk_clk_scale - Internal clk scaling operation
- *
- * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
- * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
- * Max and min clocks rate of ufs_sel defined in dts should match rate of
- * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
- * This prevent changing rate of pll clock that is shared between modules.
- *
- * @hba: per adapter instance
- * @scale_up: True for scaling up and false for scaling down
- */
-static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki = mclk->ufs_sel_clki;
- int ret = 0;
+ struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki;
+ struct regulator *reg;
+ int volt, ret = 0;
+ bool clk_bind_vcore = false;
+ bool clk_fde_scale = false;
+
+ if (!hba->clk_scaling.is_initialized)
+ return;
+
+ if (!clki || !fde_clki)
+ return;
+
+ reg = host->mclk.reg_vcore;
+ volt = host->mclk.vcore_volt;
+ if (reg && volt != 0)
+ clk_bind_vcore = true;
+
+ if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
+ clk_fde_scale = true;
ret = clk_prepare_enable(clki->clk);
if (ret) {
@@ -1624,21 +1978,109 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
return;
}
+ if (clk_fde_scale) {
+ ret = clk_prepare_enable(fde_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "fde clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+ }
+
if (scale_up) {
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, volt, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
- clki->curr_freq = clki->max_freq;
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ }
+
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_max_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ }
+ }
} else {
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_min_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
- clki->curr_freq = clki->min_freq;
- }
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
- if (ret) {
- dev_info(hba->dev,
- "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, 0, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to MIN\n");
+ }
+ }
}
+out:
clk_disable_unprepare(clki->clk);
+ if (clk_fde_scale)
+ clk_disable_unprepare(fde_clki->clk);
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+
+ if (host->clk_scale_up == scale_up)
+ goto out;
+
+ if (scale_up)
+ _ufs_mtk_clk_scale(hba, true);
+ else
+ _ufs_mtk_clk_scale(hba, false);
+
+ host->clk_scale_up = scale_up;
+
+ /* Must always set before clk_set_rate() */
+ if (scale_up)
+ clki->curr_freq = clki->max_freq;
+ else
+ clki->curr_freq = clki->min_freq;
+out:
trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
}
@@ -1748,6 +2190,7 @@ static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
return ret;
}
}
+ host->is_mcq_intr_enabled = true;
return 0;
}
@@ -1831,10 +2274,12 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
- struct device *dev = &pdev->dev;
- struct device_node *reset_node;
- struct platform_device *reset_pdev;
+ struct device *dev = &pdev->dev, *phy_dev = NULL;
+ struct device_node *reset_node, *phy_node = NULL;
+ struct platform_device *reset_pdev, *phy_pdev = NULL;
struct device_link *link;
+ struct ufs_hba *hba;
+ struct ufs_mtk_host *host;
reset_node = of_find_compatible_node(NULL, NULL,
"ti,syscon-reset");
@@ -1861,13 +2306,51 @@ static int ufs_mtk_probe(struct platform_device *pdev)
}
skip_reset:
+ /* find phy node */
+ phy_node = of_parse_phandle(dev->of_node, "phys", 0);
+
+ if (phy_node) {
+ phy_pdev = of_find_device_by_node(phy_node);
+ if (!phy_pdev)
+ goto skip_phy;
+ phy_dev = &phy_pdev->dev;
+
+ pm_runtime_set_active(phy_dev);
+ pm_runtime_enable(phy_dev);
+ pm_runtime_get_sync(phy_dev);
+
+ put_device(phy_dev);
+ dev_info(dev, "phys node found\n");
+ } else {
+ dev_notice(dev, "phys node not found\n");
+ }
+
+skip_phy:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
-
-out:
- if (err)
+ if (err) {
dev_err(dev, "probe failed %d\n", err);
+ goto out;
+ }
+
+ hba = platform_get_drvdata(pdev);
+ if (!hba)
+ goto out;
+
+ if (phy_node && phy_dev) {
+ host = ufshcd_get_variant(hba);
+ host->phy_dev = phy_dev;
+ }
+
+ /*
+ * Because the default power setting of VSx (the upper layer of
+ * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from
+ * entering LPM.
+ */
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+out:
+ of_node_put(phy_node);
of_node_put(reset_node);
return err;
}
@@ -1892,27 +2375,38 @@ static int ufs_mtk_system_suspend(struct device *dev)
ret = ufshcd_system_suspend(dev);
if (ret)
- return ret;
+ goto out;
+
+ if (pm_runtime_suspended(hba->dev))
+ goto out;
ufs_mtk_dev_vreg_set_lpm(hba, true);
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
- return 0;
+out:
+ return ret;
}
static int ufs_mtk_system_resume(struct device *dev)
{
+ int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
struct arm_smccc_res res;
- ufs_mtk_dev_vreg_set_lpm(hba, false);
+ if (pm_runtime_suspended(hba->dev))
+ goto out;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
- return ufshcd_system_resume(dev);
+ ufs_mtk_dev_vreg_set_lpm(hba, false);
+
+out:
+ ret = ufshcd_system_resume(dev);
+
+ return ret;
}
#endif
@@ -1920,6 +2414,7 @@ static int ufs_mtk_system_resume(struct device *dev)
static int ufs_mtk_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
int ret = 0;
@@ -1932,17 +2427,24 @@ static int ufs_mtk_runtime_suspend(struct device *dev)
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
+ if (host->phy_dev)
+ pm_runtime_put_sync(host->phy_dev);
+
return 0;
}
static int ufs_mtk_runtime_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
+ if (host->phy_dev)
+ pm_runtime_get_sync(host->phy_dev);
+
ufs_mtk_dev_vreg_set_lpm(hba, false);
return ufshcd_runtime_resume(dev);
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 05d76a6bd772..dfbf78bd8664 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -133,6 +133,8 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_DISABLE_MCQ = 1 << 8,
/* Control MTCMOS with RTFF */
UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9,
+
+ UFS_MTK_CAP_MCQ_BROKEN_RTC = 1 << 10,
};
struct ufs_mtk_crypt_cfg {
@@ -147,6 +149,11 @@ struct ufs_mtk_clk {
struct ufs_clk_info *ufs_sel_clki; /* Mux */
struct ufs_clk_info *ufs_sel_max_clki; /* Max src */
struct ufs_clk_info *ufs_sel_min_clki; /* Min src */
+ struct ufs_clk_info *ufs_fde_clki; /* Mux */
+ struct ufs_clk_info *ufs_fde_max_clki; /* Max src */
+ struct ufs_clk_info *ufs_fde_min_clki; /* Min src */
+ struct regulator *reg_vcore;
+ int vcore_volt;
};
struct ufs_mtk_hw_ver {
@@ -176,14 +183,17 @@ struct ufs_mtk_host {
bool mphy_powered_on;
bool unipro_lpm;
bool ref_clk_enabled;
+ bool clk_scale_up;
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
u32 ip_ver;
+ bool legacy_ip_ver;
bool mcq_set_intr;
bool is_mcq_intr_enabled;
int mcq_nr_intr;
struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
+ struct device *phy_dev;
};
/* MTK delay of autosuspend: 500 ms */
@@ -192,4 +202,27 @@ struct ufs_mtk_host {
/* MTK RTT support number */
#define MTK_MAX_NUM_RTT 2
+/* UFSHCI MTK ip version value */
+enum {
+ /* UFSHCI 3.1 */
+ IP_VER_MT6983 = 0x10360000,
+ IP_VER_MT6878 = 0x10420200,
+
+ /* UFSHCI 4.0 */
+ IP_VER_MT6897 = 0x10440000,
+ IP_VER_MT6989 = 0x10450000,
+ IP_VER_MT6899 = 0x10450100,
+ IP_VER_MT6991_A0 = 0x10460000,
+ IP_VER_MT6991_B0 = 0x10470000,
+ IP_VER_MT6993 = 0x10480000,
+
+ IP_VER_NONE = 0xFFFFFFFF
+};
+
+enum ip_ver_legacy {
+ IP_LEGACY_VER_MT6781 = 0x10380000,
+ IP_LEGACY_VER_MT6879 = 0x10360000,
+ IP_LEGACY_VER_MT6893 = 0x20160706
+};
+
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 18a978452001..3e83dc51d538 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -38,6 +38,9 @@
#define DEEMPHASIS_3_5_dB 0x04
#define NO_DEEMPHASIS 0x0
+#define UFS_ICE_SYNC_RST_SEL BIT(3)
+#define UFS_ICE_SYNC_RST_SW BIT(4)
+
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
@@ -494,12 +497,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
* If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A,
* so that the subsequent power mode change shall stick to Rate-A.
*/
- if (host->hw_ver.major == 0x5) {
- if (host->phy_gear == UFS_HS_G5)
- host_params->hs_rate = PA_HS_MODE_A;
- else
- host_params->hs_rate = PA_HS_MODE_B;
- }
+ if (host->hw_ver.major == 0x5 && host->phy_gear == UFS_HS_G5)
+ host_params->hs_rate = PA_HS_MODE_A;
mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A;
@@ -532,6 +531,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
goto out_disable_phy;
}
+ ret = phy_calibrate(phy);
+ if (ret) {
+ dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret);
+ goto out_disable_phy;
+ }
+
ufs_qcom_select_unipro_mode(host);
return 0;
@@ -552,11 +557,32 @@ out_disable_phy:
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
+ int err;
+
+ /* Enable UTP internal clock gating */
ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
ufshcd_readl(hba, REG_UFS_CFG2);
+
+ /* Enable Unipro internal clock gating */
+ err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
+ DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
+ PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL);
+out:
+ if (err)
+ dev_err(hba->dev, "hw clk gating enabled failed\n");
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -705,26 +731,17 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
if (status == PRE_CHANGE)
return 0;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
+ if (!ufs_qcom_is_link_active(hba))
ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- /* reset the connected UFS device during power down */
- ufs_qcom_device_reset_ctrl(hba, true);
- } else if (!ufs_qcom_is_link_active(hba)) {
- ufs_qcom_disable_lane_clks(host);
- }
+ /* reset the connected UFS device during power down */
+ if (ufs_qcom_is_link_off(hba) && host->device_reset)
+ ufs_qcom_device_reset_ctrl(hba, true);
return ufs_qcom_ice_suspend(host);
}
@@ -732,25 +749,28 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
int err;
+ u32 reg_val;
- if (ufs_qcom_is_link_off(hba)) {
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed PHY power on: %d\n",
- __func__, err);
- return err;
- }
-
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
- } else if (!ufs_qcom_is_link_active(hba)) {
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
+ if ((!ufs_qcom_is_link_active(hba)) &&
+ host->hw_ver.major == 5 &&
+ host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_writel(hba, UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW, UFS_MEM_ICE_CFG);
+ reg_val = ufshcd_readl(hba, UFS_MEM_ICE_CFG);
+ reg_val &= ~(UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW);
+ /*
+ * HW documentation doesn't recommend any delay between the
+ * reset set and clear. But we are enforcing an arbitrary delay
+ * to give flops enough time to settle in.
+ */
+ usleep_range(50, 100);
+ ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG);
+ ufshcd_readl(hba, UFS_MEM_ICE_CFG);
}
return ufs_qcom_ice_resume(host);
@@ -1093,6 +1113,18 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
}
}
+static void ufs_qcom_parse_gear_limits(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_host_params *host_params = &host->host_params;
+ u32 hs_gear_old = host_params->hs_tx_gear;
+
+ ufshcd_parse_gear_limits(hba, host_params);
+ if (host_params->hs_tx_gear != hs_gear_old) {
+ host->phy_gear = host_params->hs_tx_gear;
+ }
+}
+
static void ufs_qcom_set_host_params(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1130,12 +1162,20 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
+ * There are certain clocks which comes from the PHY so it needs
+ * to be managed together along with controller clocks which also
+ * provides a better power saving. Hence keep phy_power_off/on calls
+ * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be
+ * turned on/off along with UFS's clocks.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy;
+ int err;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1145,23 +1185,47 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
+ phy = host->generic_phy;
+
switch (status) {
case PRE_CHANGE:
if (on) {
ufs_qcom_icc_update_bw(host);
+ if (ufs_qcom_is_link_hibern8(hba)) {
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err) {
+ dev_err(hba->dev, "enable lane clks failed, ret=%d\n", err);
+ return err;
+ }
+ }
} else {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
+
+ err = phy_power_off(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power off failed, ret=%d\n", err);
+ return err;
+ }
}
break;
case POST_CHANGE:
if (on) {
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power on failed, ret = %d\n", err);
+ return err;
+ }
+
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
} else {
+ if (ufs_qcom_is_link_hibern8(hba))
+ ufs_qcom_disable_lane_clks(host);
+
ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
}
@@ -1312,6 +1376,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_advertise_quirks(hba);
ufs_qcom_set_host_params(hba);
ufs_qcom_set_phy_gear(host);
+ ufs_qcom_parse_gear_limits(hba);
err = ufs_qcom_ice_init(host);
if (err)
@@ -1717,7 +1782,7 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
}
static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
- const char *prefix, enum ufshcd_res id)
+ const char *prefix, void __iomem *base)
{
u32 *regs __free(kfree) = NULL;
size_t pos;
@@ -1730,7 +1795,7 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
return -ENOMEM;
for (pos = 0; pos < len; pos += 4)
- regs[pos / 4] = readl(hba->res[id].base + offset + pos);
+ regs[pos / 4] = readl(base + offset + pos);
print_hex_dump(KERN_ERR, prefix,
len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
@@ -1741,30 +1806,34 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba)
{
+ struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[0];
+ void __iomem *mcq_vs_base = hba->mcq_base + UFS_MEM_VS_BASE;
+
struct dump_info {
+ void __iomem *base;
size_t offset;
size_t len;
const char *prefix;
- enum ufshcd_res id;
};
struct dump_info mcq_dumps[] = {
- {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ},
- {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ},
- {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS},
- {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD},
- {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD},
- {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD},
- {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD},
- {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD},
- {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD},
- {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD},
- {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD},
+ {hba->mcq_base, 0x0, 256 * 4, "MCQ HCI-0 "},
+ {hba->mcq_base, 0x400, 256 * 4, "MCQ HCI-1 "},
+ {mcq_vs_base, 0x0, 5 * 4, "MCQ VS-0 "},
+ {opr->base, 0x0, 256 * 4, "MCQ SQD-0 "},
+ {opr->base, 0x400, 256 * 4, "MCQ SQD-1 "},
+ {opr->base, 0x800, 256 * 4, "MCQ SQD-2 "},
+ {opr->base, 0xc00, 256 * 4, "MCQ SQD-3 "},
+ {opr->base, 0x1000, 256 * 4, "MCQ SQD-4 "},
+ {opr->base, 0x1400, 256 * 4, "MCQ SQD-5 "},
+ {opr->base, 0x1800, 256 * 4, "MCQ SQD-6 "},
+ {opr->base, 0x1c00, 256 * 4, "MCQ SQD-7 "},
+
};
for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) {
ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len,
- mcq_dumps[i].prefix, mcq_dumps[i].id);
+ mcq_dumps[i].prefix, mcq_dumps[i].base);
cond_resched();
}
}
@@ -1873,7 +1942,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
return 0;
}
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_dev_profile *p,
struct devfreq_simple_ondemand_data *d)
@@ -1885,124 +1953,69 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
hba->clk_scaling.suspend_on_no_request = true;
}
-#else
-static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
- struct devfreq_dev_profile *p,
- struct devfreq_simple_ondemand_data *data)
-{
-}
-#endif
-
-/* Resources */
-static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
- {.name = "ufs_mem",},
- {.name = "mcq",},
- /* Submission Queue DAO */
- {.name = "mcq_sqd",},
- /* Submission Queue Interrupt Status */
- {.name = "mcq_sqis",},
- /* Completion Queue DAO */
- {.name = "mcq_cqd",},
- /* Completion Queue Interrupt Status */
- {.name = "mcq_cqis",},
- /* MCQ vendor specific */
- {.name = "mcq_vs",},
-};
static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
{
struct platform_device *pdev = to_platform_device(hba->dev);
- struct ufshcd_res_info *res;
- struct resource *res_mem, *res_mcq;
- int i, ret;
-
- memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
-
- for (i = 0; i < RES_MAX; i++) {
- res = &hba->res[i];
- res->resource = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- res->name);
- if (!res->resource) {
- dev_info(hba->dev, "Resource %s not provided\n", res->name);
- if (i == RES_UFS)
- return -ENODEV;
- continue;
- } else if (i == RES_UFS) {
- res_mem = res->resource;
- res->base = hba->mmio_base;
- continue;
- }
+ struct resource *res;
- res->base = devm_ioremap_resource(hba->dev, res->resource);
- if (IS_ERR(res->base)) {
- dev_err(hba->dev, "Failed to map res %s, err=%d\n",
- res->name, (int)PTR_ERR(res->base));
- ret = PTR_ERR(res->base);
- res->base = NULL;
- return ret;
- }
+ /* Map the MCQ configuration region */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mcq");
+ if (!res) {
+ dev_err(hba->dev, "MCQ resource not found in device tree\n");
+ return -ENODEV;
}
- /* MCQ resource provided in DT */
- res = &hba->res[RES_MCQ];
- /* Bail if MCQ resource is provided */
- if (res->base)
- goto out;
-
- /* Explicitly allocate MCQ resource from ufs_mem */
- res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
- if (!res_mcq)
- return -ENOMEM;
-
- res_mcq->start = res_mem->start +
- MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
- res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
- res_mcq->flags = res_mem->flags;
- res_mcq->name = "mcq";
-
- ret = insert_resource(&iomem_resource, res_mcq);
- if (ret) {
- dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
- ret);
- return ret;
+ hba->mcq_base = devm_ioremap_resource(hba->dev, res);
+ if (IS_ERR(hba->mcq_base)) {
+ dev_err(hba->dev, "Failed to map MCQ region: %ld\n",
+ PTR_ERR(hba->mcq_base));
+ return PTR_ERR(hba->mcq_base);
}
- res->base = devm_ioremap_resource(hba->dev, res_mcq);
- if (IS_ERR(res->base)) {
- dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
- (int)PTR_ERR(res->base));
- ret = PTR_ERR(res->base);
- goto ioremap_err;
- }
-
-out:
- hba->mcq_base = res->base;
return 0;
-ioremap_err:
- res->base = NULL;
- remove_resource(res_mcq);
- return ret;
}
static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
{
- struct ufshcd_res_info *mem_res, *sqdao_res;
struct ufshcd_mcq_opr_info_t *opr;
int i;
+ u32 doorbell_offsets[OPR_MAX];
- mem_res = &hba->res[RES_UFS];
- sqdao_res = &hba->res[RES_MCQ_SQD];
+ /*
+ * Configure doorbell address offsets in MCQ configuration registers.
+ * These values are offsets relative to mmio_base (UFS_HCI_BASE).
+ *
+ * Memory Layout:
+ * - mmio_base = UFS_HCI_BASE
+ * - mcq_base = MCQ_CONFIG_BASE = mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200)
+ * - Doorbell registers are at: mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) +
+ * - UFS_QCOM_MCQ_SQD_OFFSET
+ * - Which is also: mcq_base + UFS_QCOM_MCQ_SQD_OFFSET
+ */
- if (!mem_res->base || !sqdao_res->base)
- return -EINVAL;
+ doorbell_offsets[OPR_SQD] = UFS_QCOM_SQD_ADDR_OFFSET;
+ doorbell_offsets[OPR_SQIS] = UFS_QCOM_SQIS_ADDR_OFFSET;
+ doorbell_offsets[OPR_CQD] = UFS_QCOM_CQD_ADDR_OFFSET;
+ doorbell_offsets[OPR_CQIS] = UFS_QCOM_CQIS_ADDR_OFFSET;
+ /*
+ * Configure MCQ operation registers.
+ *
+ * The doorbell registers are physically located within the MCQ region:
+ * - doorbell_physical_addr = mmio_base + doorbell_offset
+ * - doorbell_physical_addr = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET)
+ */
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
- opr->offset = sqdao_res->resource->start -
- mem_res->resource->start + 0x40 * i;
- opr->stride = 0x100;
- opr->base = sqdao_res->base + 0x40 * i;
+ opr->offset = doorbell_offsets[i]; /* Offset relative to mmio_base */
+ opr->stride = UFS_QCOM_MCQ_STRIDE; /* 256 bytes between queues */
+
+ /*
+ * Calculate the actual doorbell base address within MCQ region:
+ * base = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET)
+ */
+ opr->base = hba->mcq_base + (opr->offset - UFS_QCOM_MCQ_CONFIG_OFFSET);
}
return 0;
@@ -2017,12 +2030,8 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
unsigned long *ocqs)
{
- struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
-
- if (!mcq_vs_res->base)
- return -EINVAL;
-
- *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
+ /* Read from MCQ vendor-specific register in MCQ region */
+ *ocqs = readl(hba->mcq_base + UFS_MEM_CQIS_VS);
return 0;
}
@@ -2053,17 +2062,6 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi)
-{
- for (struct ufs_qcom_irq *q = uqi; q->irq; q++)
- devm_free_irq(q->hba->dev, q->irq, q->hba);
-
- platform_device_msi_free_irqs_all(uqi->hba->dev);
- devm_kfree(uqi->hba->dev, uqi);
-}
-
-DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T))
-
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -2078,18 +2076,18 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
- struct ufs_qcom_irq *qi __free(ufs_qcom_irq) =
- devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
- if (!qi)
- return -ENOMEM;
- /* Preset so __free() has a pointer to hba in all error paths */
- qi[0].hba = hba;
-
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
- dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
- return ret;
+ dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n");
+ return ret; /* Continue without ESI */
+ }
+
+ struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
+
+ if (!qi) {
+ platform_device_msi_free_irqs_all(hba->dev);
+ return -ENOMEM;
}
for (int idx = 0; idx < nr_irqs; idx++) {
@@ -2100,17 +2098,18 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
IRQF_SHARED, "qcom-mcq-esi", qi + idx);
if (ret) {
- dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
+ dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n",
__func__, qi[idx].irq, ret);
- qi[idx].irq = 0;
+ /* Free previously allocated IRQs */
+ for (int j = 0; j < idx; j++)
+ devm_free_irq(hba->dev, qi[j].irq, qi + j);
+ platform_device_msi_free_irqs_all(hba->dev);
+ devm_kfree(hba->dev, qi);
return ret;
}
}
- retain_and_null_ptr(qi);
-
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0) {
+ if (host->hw_ver.major >= 6) {
ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);
}
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 0a5cfc2dd4f7..380d02333d38 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -24,6 +24,37 @@
#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
+/* bit and mask definitions for PA_VS_CLK_CFG_REG attribute */
+#define PA_VS_CLK_CFG_REG 0x9004
+#define PA_VS_CLK_CFG_REG_MASK GENMASK(8, 0)
+
+/* bit and mask definitions for DL_VS_CLK_CFG attribute */
+#define DL_VS_CLK_CFG 0xA00B
+#define DL_VS_CLK_CFG_MASK GENMASK(9, 0)
+#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
+
+/* Qualcomm MCQ Configuration */
+#define UFS_QCOM_MCQCAP_QCFGPTR 224 /* 0xE0 in hex */
+#define UFS_QCOM_MCQ_CONFIG_OFFSET (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) /* 0x1C000 */
+
+/* Doorbell offsets within MCQ region (relative to MCQ_CONFIG_BASE) */
+#define UFS_QCOM_MCQ_SQD_OFFSET 0x5000
+#define UFS_QCOM_MCQ_CQD_OFFSET 0x5080
+#define UFS_QCOM_MCQ_SQIS_OFFSET 0x5040
+#define UFS_QCOM_MCQ_CQIS_OFFSET 0x50C0
+#define UFS_QCOM_MCQ_STRIDE 0x100
+
+/* Calculated doorbell address offsets (relative to mmio_base) */
+#define UFS_QCOM_SQD_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_SQD_OFFSET)
+#define UFS_QCOM_CQD_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_CQD_OFFSET)
+#define UFS_QCOM_SQIS_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_SQIS_OFFSET)
+#define UFS_QCOM_CQIS_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_CQIS_OFFSET)
+#define REG_UFS_MCQ_STRIDE UFS_QCOM_MCQ_STRIDE
+
+/* MCQ Vendor specific address offsets (relative to MCQ_CONFIG_BASE) */
+#define UFS_MEM_VS_BASE 0x4000
+#define UFS_MEM_CQIS_VS 0x4008
+
/* QCOM UFS host controller vendor specific registers */
enum {
REG_UFS_SYS1CLK_1US = 0xC0,
@@ -51,7 +82,7 @@ enum {
UFS_AH8_CFG = 0xFC,
UFS_RD_REG_MCQ = 0xD00,
-
+ UFS_MEM_ICE_CFG = 0x2600,
REG_UFS_MEM_ICE_CONFIG = 0x260C,
REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
@@ -86,10 +117,6 @@ enum {
REG_UFS_SW_H8_EXIT_CNT = 0x2710,
};
-enum {
- UFS_MEM_CQIS_VS = 0x8,
-};
-
#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 996387906aa1..b87e03777395 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -22,17 +22,12 @@
#define MAX_SUPP_MAC 64
-struct ufs_host {
- void (*late_init)(struct ufs_hba *hba);
-};
-
enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
struct intel_host {
- struct ufs_host ufs_host;
u32 dsm_fns;
u32 active_ltr;
u32 idle_ltr;
@@ -408,8 +403,14 @@ static int ufs_intel_ehl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
-static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
+static int ufs_intel_lkf_init(struct ufs_hba *hba)
{
+ int err;
+
+ hba->nop_out_timeout = 200;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+ err = ufs_intel_common_init(hba);
/* LKF always needs a full reset, so set PM accordingly */
if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
hba->spm_lvl = UFS_PM_LVL_6;
@@ -418,19 +419,6 @@ static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
hba->spm_lvl = UFS_PM_LVL_5;
hba->rpm_lvl = UFS_PM_LVL_5;
}
-}
-
-static int ufs_intel_lkf_init(struct ufs_hba *hba)
-{
- struct ufs_host *ufs_host;
- int err;
-
- hba->nop_out_timeout = 200;
- hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
- hba->caps |= UFSHCD_CAP_CRYPTO;
- err = ufs_intel_common_init(hba);
- ufs_host = ufshcd_get_variant(hba);
- ufs_host->late_init = ufs_intel_lkf_late_init;
return err;
}
@@ -444,6 +432,8 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
static int ufs_intel_mtl_init(struct ufs_hba *hba)
{
+ hba->rpm_lvl = UFS_PM_LVL_2;
+ hba->spm_lvl = UFS_PM_LVL_2;
hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
@@ -574,7 +564,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
static int
ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct ufs_host *ufs_host;
struct ufs_hba *hba;
void __iomem *mmio_base;
int err;
@@ -607,10 +596,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- ufs_host = ufshcd_get_variant(hba);
- if (ufs_host && ufs_host->late_init)
- ufs_host->late_init(hba);
-
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
@@ -645,6 +630,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index ffe5d1d2b215..c2dafb583cf5 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -430,6 +430,39 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
}
EXPORT_SYMBOL_GPL(ufshcd_negotiate_pwr_params);
+/**
+ * ufshcd_parse_gear_limits - Parse DT-based gear and rate limits for UFS
+ * @hba: Pointer to UFS host bus adapter instance
+ * @host_params: Pointer to UFS host parameters structure to be updated
+ *
+ * This function reads optional device tree properties to apply
+ * platform-specific constraints.
+ *
+ * "limit-hs-gear": Specifies the max HS gear.
+ * "limit-gear-rate": Specifies the max High-Speed rate.
+ */
+void ufshcd_parse_gear_limits(struct ufs_hba *hba, struct ufs_host_params *host_params)
+{
+ struct device_node *np = hba->dev->of_node;
+ u32 hs_gear;
+ const char *hs_rate;
+
+ if (!of_property_read_u32(np, "limit-hs-gear", &hs_gear)) {
+ host_params->hs_tx_gear = hs_gear;
+ host_params->hs_rx_gear = hs_gear;
+ }
+
+ if (!of_property_read_string(np, "limit-gear-rate", &hs_rate)) {
+ if (!strcmp(hs_rate, "rate-a"))
+ host_params->hs_rate = PA_HS_MODE_A;
+ else if (!strcmp(hs_rate, "rate-b"))
+ host_params->hs_rate = PA_HS_MODE_B;
+ else
+ dev_warn(hba->dev, "Invalid rate: %s\n", hs_rate);
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_parse_gear_limits);
+
void ufshcd_init_host_params(struct ufs_host_params *host_params)
{
*host_params = (struct ufs_host_params){
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index 3017f8e8f93c..0a18a8aed94d 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -29,6 +29,7 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr);
void ufshcd_init_host_params(struct ufs_host_params *host_params);
+void ufshcd_parse_gear_limits(struct ufs_hba *hba, struct ufs_host_params *host_params);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_remove(struct platform_device *pdev);