diff options
Diffstat (limited to 'drivers/ufs')
-rw-r--r-- | drivers/ufs/core/ufs-mcq.c | 6 | ||||
-rw-r--r-- | drivers/ufs/core/ufs-sysfs.c | 330 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 292 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-exynos.c | 4 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-mediatek.c | 330 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-mediatek.h | 32 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 497 | ||||
-rw-r--r-- | drivers/ufs/host/ufs-qcom.h | 20 | ||||
-rw-r--r-- | drivers/ufs/host/ufshcd-pci.c | 33 |
9 files changed, 1263 insertions, 281 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index f1294c29f484..1e50675772fe 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -674,7 +674,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) int tag = scsi_cmd_to_rq(cmd)->tag; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; - unsigned long flags; int err; /* Skip task abort in case previous aborts failed and report failure */ @@ -713,10 +712,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) return FAILED; } - spin_lock_irqsave(&hwq->cq_lock, flags); - if (ufshcd_cmd_inflight(lrbp->cmd)) - ufshcd_release_scsi_cmd(hba, lrbp); - spin_unlock_irqrestore(&hwq->cq_lock, flags); - return SUCCESS; } diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 634cf163f4cb..4bd7d491e3c5 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -5,6 +5,7 @@ #include <linux/string.h> #include <linux/bitfield.h> #include <linux/unaligned.h> +#include <linux/string_choices.h> #include <ufs/ufs.h> #include <ufs/unipro.h> @@ -57,6 +58,53 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear) } } +static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint) +{ + switch (hint) { + case WB_RESIZE_HINT_KEEP: + return "keep"; + case WB_RESIZE_HINT_DECREASE: + return "decrease"; + case WB_RESIZE_HINT_INCREASE: + return "increase"; + default: + return "unknown"; + } +} + +static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status) +{ + switch (status) { + case WB_RESIZE_STATUS_IDLE: + return "idle"; + case WB_RESIZE_STATUS_IN_PROGRESS: + return "in_progress"; + case WB_RESIZE_STATUS_COMPLETE_SUCCESS: + return "complete_success"; + case WB_RESIZE_STATUS_GENERAL_FAILURE: + return "general_failure"; + default: + return "unknown"; + } +} + +static const char * const ufs_hid_states[] = { + [HID_IDLE] = "idle", + [ANALYSIS_IN_PROGRESS] = "analysis_in_progress", + [DEFRAG_REQUIRED] = "defrag_required", + [DEFRAG_IN_PROGRESS] = "defrag_in_progress", + [DEFRAG_COMPLETED] = "defrag_completed", + [DEFRAG_NOT_REQUIRED] = "defrag_not_required", +}; + +static const char *ufs_hid_state_to_string(enum ufs_hid_state state) +{ + if (state < NUM_UFS_HID_STATES) + return ufs_hid_states[state]; + + return "unknown"; +} + static const char *ufshcd_uic_link_state_to_string( enum uic_link_state state) { @@ -411,6 +459,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev, return count; } +static const char * const wb_resize_en_mode[] = { + [WB_RESIZE_EN_IDLE] = "idle", + [WB_RESIZE_EN_DECREASE] = "decrease", + [WB_RESIZE_EN_INCREASE] = "increase", +}; + +static ssize_t wb_resize_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + ssize_t res; + + if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled + || !hba->dev_info.b_presrv_uspc_en + || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) + return -EOPNOTSUPP; + + mode = sysfs_match_string(wb_resize_en_mode, buf); + if (mode < 0) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + res = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + res = ufshcd_wb_set_resize_en(hba, mode); + ufshcd_rpm_put_sync(hba); + +out: + up(&hba->host_sem); + return res < 0 ? res : count; +} + /** * pm_qos_enable_show - sysfs handler to show pm qos enable value * @dev: device associated with the UFS controller @@ -526,6 +612,7 @@ static DEVICE_ATTR_RW(auto_hibern8); static DEVICE_ATTR_RW(wb_on); static DEVICE_ATTR_RW(enable_wb_buf_flush); static DEVICE_ATTR_RW(wb_flush_threshold); +static DEVICE_ATTR_WO(wb_resize_enable); static DEVICE_ATTR_RW(rtc_update_ms); static DEVICE_ATTR_RW(pm_qos_enable); static DEVICE_ATTR_RO(critical_health); @@ -543,6 +630,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_wb_on.attr, &dev_attr_enable_wb_buf_flush.attr, &dev_attr_wb_flush_threshold.attr, + &dev_attr_wb_resize_enable.attr, &dev_attr_rtc_update_ms.attr, &dev_attr_pm_qos_enable.attr, &dev_attr_critical_health.attr, @@ -1429,7 +1517,7 @@ static ssize_t _name##_show(struct device *dev, \ ret = -EINVAL; \ goto out; \ } \ - ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \ + ret = sysfs_emit(buf, "%s\n", str_true_false(flag)); \ out: \ up(&hba->host_sem); \ return ret; \ @@ -1549,6 +1637,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE; } +static int wb_read_resize_attrs(struct ufs_hba *hba, + enum attr_idn idn, u32 *attr_val) +{ + u8 index = 0; + int ret; + + if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled + || !hba->dev_info.b_presrv_uspc_en + || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) + return -EOPNOTSUPP; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + index = ufshcd_wb_get_query_index(hba); + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + idn, index, 0, attr_val); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return ret; +} + +static ssize_t wb_resize_hint_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + u32 value; + + ret = wb_read_resize_attrs(hba, + QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value)); +} + +static DEVICE_ATTR_RO(wb_resize_hint); + +static ssize_t wb_resize_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + u32 value; + + ret = wb_read_resize_attrs(hba, + QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value)); +} + +static DEVICE_ATTR_RO(wb_resize_status); + #define UFS_ATTRIBUTE(_name, _uname) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -1622,6 +1771,8 @@ static struct attribute *ufs_sysfs_attributes[] = { &dev_attr_wb_avail_buf.attr, &dev_attr_wb_life_time_est.attr, &dev_attr_wb_cur_buf.attr, + &dev_attr_wb_resize_hint.attr, + &dev_attr_wb_resize_status.attr, NULL, }; @@ -1630,6 +1781,178 @@ static const struct attribute_group ufs_sysfs_attributes_group = { .attrs = ufs_sysfs_attributes, }; +static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u32 *attr_val) +{ + int ret; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return ret; +} + +static ssize_t analysis_trigger_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + int ret; + + if (sysfs_streq(buf, "enable")) + mode = HID_ANALYSIS_ENABLE; + else if (sysfs_streq(buf, "disable")) + mode = HID_ANALYSIS_AND_DEFRAG_DISABLE; + else + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_WO(analysis_trigger); + +static ssize_t defrag_trigger_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + int ret; + + if (sysfs_streq(buf, "enable")) + mode = HID_ANALYSIS_AND_DEFRAG_ENABLE; + else if (sysfs_streq(buf, "disable")) + mode = HID_ANALYSIS_AND_DEFRAG_DISABLE; + else + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_WO(defrag_trigger); + +static ssize_t fragmented_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static DEVICE_ATTR_RO(fragmented_size); + +static ssize_t defrag_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_SIZE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static ssize_t defrag_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + if (kstrtou32(buf, 0, &value)) + return -EINVAL; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_HID_SIZE, &value); + + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR_RW(defrag_size); + +static ssize_t progress_ratio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%u\n", value); +} + +static DEVICE_ATTR_RO(progress_ratio); + +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_HID_STATE, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value)); +} + +static DEVICE_ATTR_RO(state); + +static struct attribute *ufs_sysfs_hid[] = { + &dev_attr_analysis_trigger.attr, + &dev_attr_defrag_trigger.attr, + &dev_attr_fragmented_size.attr, + &dev_attr_defrag_size.attr, + &dev_attr_progress_ratio.attr, + &dev_attr_state.attr, + NULL, +}; + +static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct ufs_hba *hba = dev_get_drvdata(dev); + + return hba->dev_info.hid_sup ? attr->mode : 0; +} + +static const struct attribute_group ufs_sysfs_hid_group = { + .name = "hid", + .attrs = ufs_sysfs_hid, + .is_visible = ufs_sysfs_hid_is_visible, +}; + static const struct attribute_group *ufs_sysfs_groups[] = { &ufs_sysfs_default_group, &ufs_sysfs_capabilities_group, @@ -1644,6 +1967,7 @@ static const struct attribute_group *ufs_sysfs_groups[] = { &ufs_sysfs_string_descriptors_group, &ufs_sysfs_flags_group, &ufs_sysfs_attributes_group, + &ufs_sysfs_hid_group, NULL, }; @@ -1675,7 +1999,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); -UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); +UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8); UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); @@ -1692,7 +2016,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { &dev_attr_logical_block_count.attr, &dev_attr_erase_block_size.attr, &dev_attr_provisioning_type.attr, - &dev_attr_physical_memory_resourse_count.attr, + &dev_attr_physical_memory_resource_count.attr, &dev_attr_context_capabilities.attr, &dev_attr_large_unit_granularity.attr, &dev_attr_wb_buf_alloc_units.attr, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 7735421e3991..96ad57c3144b 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -53,7 +53,7 @@ /* UIC command timeout, unit: ms */ enum { UIC_CMD_TIMEOUT_DEFAULT = 500, - UIC_CMD_TIMEOUT_MAX = 2000, + UIC_CMD_TIMEOUT_MAX = 5000, }; /* NOP OUT retries waiting for NOP IN response */ #define NOP_OUT_RETRIES 10 @@ -63,7 +63,11 @@ enum { /* Query request retries */ #define QUERY_REQ_RETRIES 3 /* Query request timeout */ -#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ +enum { + QUERY_REQ_TIMEOUT_MIN = 1, + QUERY_REQ_TIMEOUT_DEFAULT = 1500, + QUERY_REQ_TIMEOUT_MAX = 30000 +}; /* Advanced RPMB request timeout */ #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */ @@ -133,7 +137,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = { module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644); MODULE_PARM_DESC(uic_cmd_timeout, - "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively"); + "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively"); + +static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT; + +static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN, + QUERY_REQ_TIMEOUT_MAX); +} + +static const struct kernel_param_ops dev_cmd_timeout_ops = { + .set = dev_cmd_timeout_set, + .get = param_get_uint, +}; + +module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644); +MODULE_PARM_DESC(dev_cmd_timeout, + "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively"); #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ ({ \ @@ -343,6 +364,34 @@ void ufshcd_disable_irq(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_disable_irq); +/** + * ufshcd_enable_intr - enable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + u32 new_val = old_val | intrs; + + if (new_val != old_val) + ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_disable_intr - disable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + u32 new_val = old_val & ~intrs; + + if (new_val != old_val) + ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE); +} + static void ufshcd_configure_wb(struct ufs_hba *hba) { if (!ufshcd_is_wb_allowed(hba)) @@ -432,7 +481,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, u8 opcode = 0, group_id = 0; u32 doorbell = 0; u32 intr; - int hwq_id = -1; + u32 hwq_id = 0; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct scsi_cmnd *cmd = lrbp->cmd; struct request *rq = scsi_cmd_to_rq(cmd); @@ -644,9 +693,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), hba->ufs_stats.hibern8_exit_cnt); - dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", - div_u64(hba->ufs_stats.last_intr_ts, 1000), - hba->ufs_stats.last_intr_status); dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", hba->eh_flags, hba->req_abort_count); dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", @@ -1379,6 +1425,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) * make sure that there are no outstanding requests when * clock scaling is in progress */ + mutex_lock(&hba->host->scan_mutex); blk_mq_quiesce_tagset(&hba->host->tag_set); mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); @@ -1389,6 +1436,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); goto out; } @@ -1410,6 +1458,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err) mutex_unlock(&hba->wb_mutex); blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); ufshcd_release(hba); } @@ -2545,7 +2594,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * @hba: per adapter instance * @uic_cmd: UIC command * - * Return: 0 only if success. + * Return: 0 if successful; < 0 upon failure. */ static int __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) @@ -2575,6 +2624,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) */ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { + unsigned long flags; int ret; if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) @@ -2584,6 +2634,10 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + spin_unlock_irqrestore(hba->host->host_lock, flags); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -2661,32 +2715,6 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) } /** - * ufshcd_enable_intr - enable interrupts - * @hba: per adapter instance - * @intrs: interrupt bits - */ -static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) -{ - u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - - set |= intrs; - ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); -} - -/** - * ufshcd_disable_intr - disable interrupts - * @hba: per adapter instance - * @intrs: interrupt bits - */ -static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) -{ - u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - - set &= ~intrs; - ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); -} - -/** * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request * descriptor according to request * @hba: per adapter instance @@ -2805,8 +2833,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, /* Copy the Descriptor */ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) memcpy(ucd_req_ptr + 1, query->descriptor, len); - - memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) @@ -2819,8 +2845,6 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) .transaction_code = UPIU_TRANSACTION_NOP_OUT, .task_tag = lrbp->task_tag, }; - - memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } /** @@ -2846,6 +2870,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, else ret = -EINVAL; + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + return ret; } @@ -3053,6 +3079,9 @@ static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, hba->dev_cmd.type = cmd_type; } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) { @@ -3165,9 +3194,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int max_timeout) { @@ -3242,6 +3275,7 @@ retry: } } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3259,6 +3293,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba) ufshcd_release(hba); } +/* + * Return: 0 upon success; < 0 upon failure. + */ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, const u32 tag, int timeout) { @@ -3346,6 +3383,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", __func__, opcode, idn, ret, retries); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -3357,7 +3395,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, * @index: flag index to access * @flag_res: the flag value after the query request completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res) @@ -3365,7 +3403,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, struct ufs_query_req *request = NULL; struct ufs_query_res *response = NULL; int err, selector = 0; - int timeout = QUERY_REQ_TIMEOUT; + int timeout = dev_cmd_timeout; BUG_ON(!hba); @@ -3413,6 +3451,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3425,7 +3464,7 @@ out_unlock: * @selector: selector field * @attr_val: the attribute value after the query request completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 upon success; < 0 upon failure. */ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) @@ -3462,7 +3501,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, goto out_unlock; } - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) { dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", @@ -3474,6 +3513,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, out_unlock: ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3488,7 +3528,7 @@ out_unlock: * @attr_val: the attribute value after the query request * completes * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, @@ -3511,9 +3551,13 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba, dev_err(hba->dev, "%s: query attribute, idn %d, failed with error %d after %d retries\n", __func__, idn, ret, QUERY_REQ_RETRIES); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } +/* + * Return: 0 if successful; < 0 upon failure. + */ static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len) @@ -3558,7 +3602,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out_unlock; } - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) { dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", @@ -3571,6 +3615,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, out_unlock: hba->dev_cmd.query.descriptor = NULL; ufshcd_dev_man_unlock(hba); + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3587,7 +3632,7 @@ out_unlock: * The buf_len parameter will contain, on return, the length parameter * received on the response. * - * Return: 0 for success, non-zero in case of failure. + * Return: 0 for success; < 0 upon failure. */ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, @@ -3605,6 +3650,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, break; } + WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err); return err; } @@ -3617,7 +3663,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, * @param_read_buf: pointer to buffer where parameter would be read * @param_size: sizeof(param_read_buf) * - * Return: 0 in case of success, non-zero otherwise. + * Return: 0 in case of success; < 0 upon failure. */ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, @@ -3684,6 +3730,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, out: if (is_kmalloc) kfree(desc_buf); + WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret); return ret; } @@ -3797,7 +3844,7 @@ out: * @param_read_buf: pointer to buffer where parameter would be read * @param_size: sizeof(param_read_buf) * - * Return: 0 in case of success, non-zero otherwise. + * Return: 0 in case of success; < 0 upon failure. */ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, int lun, @@ -4233,6 +4280,30 @@ out: EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); /** + * ufshcd_dme_rmw - get modify set a DME attribute + * @hba: per adapter instance + * @mask: indicates which bits to clear from the value that has been read + * @val: actual value to write + * @attr: dme attribute + */ +int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask, + u32 val, u32 attr) +{ + u32 cfg = 0; + int err; + + err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg); + if (err) + return err; + + cfg &= ~mask; + cfg |= (val & mask); + + return ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg); +} +EXPORT_SYMBOL_GPL(ufshcd_dme_rmw); + +/** * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power * state) and waits for it to take effect. * @@ -4254,7 +4325,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) unsigned long flags; u8 status; int ret; - bool reenable_intr = false; mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); @@ -4265,15 +4335,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) goto out_unlock; } hba->uic_async_done = &uic_async_done; - if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { - ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); - /* - * Make sure UIC command completion interrupt is disabled before - * issuing UIC command. - */ - ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - reenable_intr = true; - } + ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); spin_unlock_irqrestore(hba->host->host_lock, flags); ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { @@ -4317,9 +4379,7 @@ out: spin_lock_irqsave(hba->host->host_lock, flags); hba->active_uic_cmd = NULL; hba->uic_async_done = NULL; - if (reenable_intr) - ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); - if (ret) { + if (ret && !hba->pm_op_in_progress) { ufshcd_set_link_broken(hba); ufshcd_schedule_eh_work(hba); } @@ -4327,6 +4387,14 @@ out_unlock: spin_unlock_irqrestore(hba->host->host_lock, flags); mutex_unlock(&hba->uic_cmd_mutex); + /* + * If the h8 exit fails during the runtime resume process, it becomes + * stuck and cannot be recovered through the error handler. To fix + * this, use link recovery instead of the error handler. + */ + if (ret && hba->pm_op_in_progress) + ret = ufshcd_link_recovery(hba); + return ret; } @@ -4341,28 +4409,17 @@ int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { int ret; + if (uic_cmd->argument1 != UIC_ARG_MIB(PA_PWRMODE) || + uic_cmd->command != UIC_CMD_DME_SET) + return ufshcd_send_uic_cmd(hba, uic_cmd); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) return 0; ufshcd_hold(hba); - - if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) && - uic_cmd->command == UIC_CMD_DME_SET) { - ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd); - goto out; - } - - mutex_lock(&hba->uic_cmd_mutex); - ufshcd_add_delay_before_dme_cmd(hba); - - ret = __ufshcd_send_uic_cmd(hba, uic_cmd); - if (!ret) - ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); - - mutex_unlock(&hba->uic_cmd_mutex); - -out: + ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd); ufshcd_release(hba); + return ret; } @@ -4775,7 +4832,7 @@ out: * 3. Program UTRL and UTMRL base address * 4. Configure run-stop-registers * - * Return: 0 on success, non-zero value on failure. + * Return: 0 if successful; < 0 upon failure. */ int ufshcd_make_hba_operational(struct ufs_hba *hba) { @@ -6020,7 +6077,7 @@ int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id) request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) { dev_err(hba->dev, "%s: failed to read device level exception %d\n", @@ -6107,6 +6164,21 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) return ret; } +int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode) +{ + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode); + if (ret) + dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n", + __func__, ret); + + return ret; +} + static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba, u32 avail_buf) { @@ -6572,7 +6644,7 @@ static void ufshcd_err_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eh_work); dev_info(hba->dev, - "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", + "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n", __func__, ufshcd_state_name[hba->ufshcd_state], hba->is_powered, hba->shutting_down, hba->saved_err, hba->saved_uic_err, hba->force_reset, @@ -6587,9 +6659,14 @@ static void ufshcd_err_handler(struct work_struct *work) up(&hba->host_sem); return; } - ufshcd_set_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_err_handling_prepare(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Complete requests that have door-bell cleared by h/w */ ufshcd_complete_requests(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); @@ -7001,7 +7078,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) } /** - * ufshcd_intr - Main interrupt service routine + * ufshcd_threaded_intr - Threaded interrupt service routine * @irq: irq number * @__hba: pointer to adapter instance * @@ -7009,16 +7086,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_intr(int irq, void *__hba) +static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status = 0; + u32 last_intr_status, intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; - intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - hba->ufs_stats.last_intr_status = intr_status; - hba->ufs_stats.last_intr_ts = local_clock(); + last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); /* * There could be max of hba->nutrs reqs in flight and in worst case @@ -7042,7 +7117,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", __func__, intr_status, - hba->ufs_stats.last_intr_status, + last_intr_status, enabled_intr_status); ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); } @@ -7050,6 +7125,29 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) return retval; } +/** + * ufshcd_intr - Main interrupt service routine + * @irq: irq number + * @__hba: pointer to adapter instance + * + * Return: + * IRQ_HANDLED - If interrupt is valid + * IRQ_WAKE_THREAD - If handling is moved to threaded handled + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_intr(int irq, void *__hba) +{ + struct ufs_hba *hba = __hba; + + /* Move interrupt handling to thread when MCQ & ESI are not enabled */ + if (!hba->mcq_enabled || !hba->mcq_esi_enabled) + return IRQ_WAKE_THREAD; + + /* Directly handle interrupts since MCQ ESI handlers does the hard job */ + return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) & + ufshcd_readl(hba, REG_INTERRUPT_ENABLE)); +} + static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; @@ -7245,7 +7343,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, * bound to fail since dev_cmd.query and dev_cmd.type were left empty. * read the response directly ignoring all errors. */ - ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT); + ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout); /* just copy the upiu response as it is */ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); @@ -7745,7 +7843,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ - ufshcd_scale_clks(hba, ULONG_MAX, true); + if (ufshcd_is_clkscaling_supported(hba)) + ufshcd_scale_clks(hba, ULONG_MAX, true); err = ufshcd_hba_enable(hba); @@ -8107,6 +8206,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) */ dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; + dev_info->ext_wb_sup = get_unaligned_be16(desc_buf + + DEVICE_DESC_PARAM_EXT_WB_SUP); + dev_info->b_presrv_uspc_en = desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; @@ -8354,6 +8456,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba) dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP]; + dev_info->hid_sup = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) & + UFS_DEV_HID_SUPPORT; + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; err = ufshcd_read_string_desc(hba, model_index, @@ -8678,7 +8784,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3); - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) dev_err(hba->dev, "%s: failed to set timestamp %d\n", @@ -8793,6 +8899,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) u32 intrs; ret = ufshcd_mcq_vops_config_esi(hba); + hba->mcq_esi_enabled = !ret; dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); intrs = UFSHCD_ENABLE_MCQ_INTRS; @@ -10654,7 +10761,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_readl(hba, REG_INTERRUPT_ENABLE); /* IRQ registration */ - err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); + err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr, + IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba); if (err) { dev_err(hba->dev, "request irq failed\n"); goto out_disable; diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 3e545af536e5..f0adcd9dd553 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -1110,8 +1110,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba) hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE); hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE); - hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); - hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); + hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); + hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 182f58d0c9db..86ae73b89d4d 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -50,6 +50,7 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { static const struct of_device_id ufs_mtk_of_match[] = { { .compatible = "mediatek,mt8183-ufshci" }, + { .compatible = "mediatek,mt8195-ufshci" }, {}, }; MODULE_DEVICE_TABLE(of, ufs_mtk_of_match); @@ -96,49 +97,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE); + return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE; } static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); + return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL; } static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); + return host->caps & UFS_MTK_CAP_BROKEN_VCC; } static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO); + return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO; } static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX); + return host->caps & UFS_MTK_CAP_TX_SKEW_FIX; } static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS); + return host->caps & UFS_MTK_CAP_RTFF_MTCMOS; } static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM); + return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM; +} + +static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_clk *mclk = &host->mclk; + + return mclk->ufs_sel_clki && + mclk->ufs_sel_max_clki && + mclk->ufs_sel_min_clki; } static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) @@ -267,6 +278,13 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, REG_UFS_XOUFS_CTRL); + + /* DDR_EN setting */ + if (host->ip_ver >= IP_VER_MT6989) { + ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8), + 0x453000, REG_UFS_MMIO_OPT_CTRL_0); + } + } return 0; @@ -344,7 +362,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); - ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res); + /* + * If clock on timeout, assume clock is off, notify tfa do clock + * off setting.(keep DIFN disable, release resource) + * If clock off timeout, assume clock will off finally, + * set ref_clk_enabled directly.(keep DIFN disable, keep resource) + */ + if (on) + ufs_mtk_ref_clk_notify(false, POST_CHANGE, res); + else + host->ref_clk_enabled = false; return -ETIMEDOUT; @@ -663,6 +690,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba) if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos")) host->caps |= UFS_MTK_CAP_RTFF_MTCMOS; + if (of_property_read_bool(np, "mediatek,ufs-broken-rtc")) + host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC; + dev_info(hba->dev, "caps: 0x%x", host->caps); } @@ -779,6 +809,91 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, return ret; } +static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct blk_mq_tag_set *tag_set = &hba->host->tag_set; + struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT]; + unsigned int nr = map->nr_queues; + unsigned int q_index; + + q_index = map->mq_map[cpu]; + if (q_index > nr) { + dev_err(hba->dev, "hwq index %d exceed %d\n", + q_index, nr); + return MTK_MCQ_INVALID_IRQ; + } + + return host->mcq_intr_info[q_index].irq; +} + +static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu) +{ + unsigned int irq, _cpu; + int ret; + + irq = ufs_mtk_mcq_get_irq(hba, cpu); + if (irq == MTK_MCQ_INVALID_IRQ) { + dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu); + return; + } + + /* force migrate irq of cpu0 to cpu3 */ + _cpu = (cpu == 0) ? 3 : cpu; + ret = irq_set_affinity(irq, cpumask_of(_cpu)); + if (ret) { + dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n", + irq, _cpu); + return; + } + dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu); +} + +static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver) +{ + bool is_legacy = false; + + switch (hw_ip_ver) { + case IP_LEGACY_VER_MT6893: + case IP_LEGACY_VER_MT6781: + /* can add other legacy chipset ID here accordingly */ + is_legacy = true; + break; + default: + break; + } + dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy); + + return is_legacy; +} + +/* + * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since + * project MT6878. In order to perform correct version comparison, + * version number is changed by SW for the following projects. + * IP_VER_MT6983 0x00360000 to 0x10360000 + * IP_VER_MT6897 0x01440000 to 0x10440000 + * IP_VER_MT6989 0x01450000 to 0x10450000 + * IP_VER_MT6991 0x01460000 to 0x10460000 + */ +static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 hw_ip_ver; + + hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + + if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) || + ((hw_ip_ver & (0xFF << 24)) == 0)) { + hw_ip_ver &= ~(0xFF << 24); + hw_ip_ver |= (0x1 << 28); + } + + host->ip_ver = hw_ip_ver; + + host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver); +} + static void ufs_mtk_get_controller_version(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); @@ -818,8 +933,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; - struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki, *clki_tmp; + struct device *dev = hba->dev; + struct regulator *reg; + u32 volt; /* * Find private clocks and store them in struct ufs_mtk_clk. @@ -837,15 +954,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba) host->mclk.ufs_sel_min_clki = clki; clk_disable_unprepare(clki->clk); list_del(&clki->list); + } else if (!strcmp(clki->name, "ufs_fde")) { + host->mclk.ufs_fde_clki = clki; + } else if (!strcmp(clki->name, "ufs_fde_max_src")) { + host->mclk.ufs_fde_max_clki = clki; + clk_disable_unprepare(clki->clk); + list_del(&clki->list); + } else if (!strcmp(clki->name, "ufs_fde_min_src")) { + host->mclk.ufs_fde_min_clki = clki; + clk_disable_unprepare(clki->clk); + list_del(&clki->list); } } - if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki || - !mclk->ufs_sel_min_clki) { + list_for_each_entry(clki, head, list) { + dev_info(hba->dev, "clk \"%s\" present", clki->name); + } + + if (!ufs_mtk_is_clk_scale_ready(hba)) { hba->caps &= ~UFSHCD_CAP_CLK_SCALING; dev_info(hba->dev, "%s: Clk-scaling not ready. Feature disabled.", __func__); + return; + } + + /* + * Default get vcore if dts have these settings. + * No matter clock scaling support or not. (may disable by customer) + */ + reg = devm_regulator_get_optional(dev, "dvfsrc-vcore"); + if (IS_ERR(reg)) { + dev_info(dev, "failed to get dvfsrc-vcore: %ld", + PTR_ERR(reg)); + return; + } + + if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min", + &volt)) { + dev_info(dev, "failed to get clk-scale-up-vcore-min"); + return; + } + + host->mclk.reg_vcore = reg; + host->mclk.vcore_volt = volt; + + /* If default boot is max gear, request vcore */ + if (reg && volt && host->clk_scale_up) { + if (regulator_set_voltage(reg, volt, INT_MAX)) { + dev_info(hba->dev, + "Failed to set vcore to %d\n", volt); + } } } @@ -1014,13 +1173,17 @@ static int ufs_mtk_init(struct ufs_hba *hba) /* Enable clk scaling*/ hba->caps |= UFSHCD_CAP_CLK_SCALING; + host->clk_scale_up = true; /* default is max freq */ /* Set runtime pm delay to replace default */ shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS; hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR; - hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; + if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC) + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; + hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); if (host->caps & UFS_MTK_CAP_DISABLE_AH8) @@ -1050,7 +1213,7 @@ static int ufs_mtk_init(struct ufs_hba *hba) ufs_mtk_setup_clocks(hba, true, POST_CHANGE); - host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); + ufs_mtk_get_hw_ip_version(hba); goto out; @@ -1505,6 +1668,13 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) { struct ufs_dev_info *dev_info = &hba->dev_info; u16 mid = dev_info->wmanufacturerid; + unsigned int cpu; + + if (hba->mcq_enabled) { + /* Iterate all cpus to set affinity for mcq irqs */ + for (cpu = 0; cpu < nr_cpu_ids; cpu++) + ufs_mtk_mcq_set_irq_affinity(hba, cpu); + } if (mid == UFS_VENDOR_SAMSUNG) { ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); @@ -1598,24 +1768,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba, hba->vps->ondemand_data.downdifferential = 20; } -/** - * ufs_mtk_clk_scale - Internal clk scaling operation - * - * MTK platform supports clk scaling by switching parent of ufs_sel(mux). - * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. - * Max and min clocks rate of ufs_sel defined in dts should match rate of - * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. - * This prevent changing rate of pll clock that is shared between modules. - * - * @hba: per adapter instance - * @scale_up: True for scaling up and false for scaling down - */ -static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) +static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki = mclk->ufs_sel_clki; - int ret = 0; + struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki; + struct regulator *reg; + int volt, ret = 0; + bool clk_bind_vcore = false; + bool clk_fde_scale = false; + + if (!hba->clk_scaling.is_initialized) + return; + + if (!clki || !fde_clki) + return; + + reg = host->mclk.reg_vcore; + volt = host->mclk.vcore_volt; + if (reg && volt != 0) + clk_bind_vcore = true; + + if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki) + clk_fde_scale = true; ret = clk_prepare_enable(clki->clk); if (ret) { @@ -1624,21 +1800,109 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) return; } + if (clk_fde_scale) { + ret = clk_prepare_enable(fde_clki->clk); + if (ret) { + dev_info(hba->dev, + "fde clk_prepare_enable() fail, ret: %d\n", ret); + return; + } + } + if (scale_up) { + if (clk_bind_vcore) { + ret = regulator_set_voltage(reg, volt, INT_MAX); + if (ret) { + dev_info(hba->dev, + "Failed to set vcore to %d\n", volt); + goto out; + } + } + ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk); - clki->curr_freq = clki->max_freq; + if (ret) { + dev_info(hba->dev, "Failed to set clk mux, ret = %d\n", + ret); + } + + if (clk_fde_scale) { + ret = clk_set_parent(fde_clki->clk, + mclk->ufs_fde_max_clki->clk); + if (ret) { + dev_info(hba->dev, + "Failed to set fde clk mux, ret = %d\n", + ret); + } + } } else { + if (clk_fde_scale) { + ret = clk_set_parent(fde_clki->clk, + mclk->ufs_fde_min_clki->clk); + if (ret) { + dev_info(hba->dev, + "Failed to set fde clk mux, ret = %d\n", + ret); + goto out; + } + } + ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk); - clki->curr_freq = clki->min_freq; - } + if (ret) { + dev_info(hba->dev, "Failed to set clk mux, ret = %d\n", + ret); + goto out; + } - if (ret) { - dev_info(hba->dev, - "Failed to set ufs_sel_clki, ret: %d\n", ret); + if (clk_bind_vcore) { + ret = regulator_set_voltage(reg, 0, INT_MAX); + if (ret) { + dev_info(hba->dev, + "failed to set vcore to MIN\n"); + } + } } +out: clk_disable_unprepare(clki->clk); + if (clk_fde_scale) + clk_disable_unprepare(fde_clki->clk); +} + +/** + * ufs_mtk_clk_scale - Internal clk scaling operation + * + * MTK platform supports clk scaling by switching parent of ufs_sel(mux). + * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. + * Max and min clocks rate of ufs_sel defined in dts should match rate of + * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. + * This prevent changing rate of pll clock that is shared between modules. + * + * @hba: per adapter instance + * @scale_up: True for scaling up and false for scaling down + */ +static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct ufs_mtk_clk *mclk = &host->mclk; + struct ufs_clk_info *clki = mclk->ufs_sel_clki; + + if (host->clk_scale_up == scale_up) + goto out; + + if (scale_up) + _ufs_mtk_clk_scale(hba, true); + else + _ufs_mtk_clk_scale(hba, false); + + host->clk_scale_up = scale_up; + + /* Must always set before clk_set_rate() */ + if (scale_up) + clki->curr_freq = clki->max_freq; + else + clki->curr_freq = clki->min_freq; +out: trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk)); } diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h index 05d76a6bd772..e46dc5fa209d 100644 --- a/drivers/ufs/host/ufs-mediatek.h +++ b/drivers/ufs/host/ufs-mediatek.h @@ -133,6 +133,8 @@ enum ufs_mtk_host_caps { UFS_MTK_CAP_DISABLE_MCQ = 1 << 8, /* Control MTCMOS with RTFF */ UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9, + + UFS_MTK_CAP_MCQ_BROKEN_RTC = 1 << 10, }; struct ufs_mtk_crypt_cfg { @@ -147,6 +149,11 @@ struct ufs_mtk_clk { struct ufs_clk_info *ufs_sel_clki; /* Mux */ struct ufs_clk_info *ufs_sel_max_clki; /* Max src */ struct ufs_clk_info *ufs_sel_min_clki; /* Min src */ + struct ufs_clk_info *ufs_fde_clki; /* Mux */ + struct ufs_clk_info *ufs_fde_max_clki; /* Max src */ + struct ufs_clk_info *ufs_fde_min_clki; /* Min src */ + struct regulator *reg_vcore; + int vcore_volt; }; struct ufs_mtk_hw_ver { @@ -176,9 +183,11 @@ struct ufs_mtk_host { bool mphy_powered_on; bool unipro_lpm; bool ref_clk_enabled; + bool clk_scale_up; u16 ref_clk_ungating_wait_us; u16 ref_clk_gating_wait_us; u32 ip_ver; + bool legacy_ip_ver; bool mcq_set_intr; bool is_mcq_intr_enabled; @@ -192,4 +201,27 @@ struct ufs_mtk_host { /* MTK RTT support number */ #define MTK_MAX_NUM_RTT 2 +/* UFSHCI MTK ip version value */ +enum { + /* UFSHCI 3.1 */ + IP_VER_MT6983 = 0x10360000, + IP_VER_MT6878 = 0x10420200, + + /* UFSHCI 4.0 */ + IP_VER_MT6897 = 0x10440000, + IP_VER_MT6989 = 0x10450000, + IP_VER_MT6899 = 0x10450100, + IP_VER_MT6991_A0 = 0x10460000, + IP_VER_MT6991_B0 = 0x10470000, + IP_VER_MT6993 = 0x10480000, + + IP_VER_NONE = 0xFFFFFFFF +}; + +enum ip_ver_legacy { + IP_LEGACY_VER_MT6781 = 0x10380000, + IP_LEGACY_VER_MT6879 = 0x10360000, + IP_LEGACY_VER_MT6893 = 0x20160706 +}; + #endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index c0761ccc1381..76fc70503a62 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -5,6 +5,7 @@ #include <linux/acpi.h> #include <linux/clk.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/devfreq.h> #include <linux/gpio/consumer.h> @@ -102,8 +103,28 @@ static const struct __ufs_qcom_bw_table { [MODE_MAX][0][0] = { 7643136, 819200 }, }; +static const struct { + int nminor; + char *prefix; +} testbus_info[TSTBUS_MAX] = { + [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"}, + [TSTBUS_UARM] = {32, "TSTBUS_UARM"}, + [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"}, + [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"}, + [TSTBUS_DFC] = {32, "TSTBUS_DFC"}, + [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"}, + [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"}, + [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"}, + [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"}, + [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"}, + [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"}, + [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"}, +}; + static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq); +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name); +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq); static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) { @@ -173,7 +194,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host) profile->ll_ops = ufs_qcom_crypto_ops; profile->max_dun_bytes_supported = 8; - profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW; + profile->key_types_supported = qcom_ice_get_supported_key_type(ice); profile->dev = dev; /* @@ -221,17 +242,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile, struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; - /* Only AES-256-XTS has been tested so far. */ - if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS) - return -EOPNOTSUPP; - ufshcd_hold(hba); - err = qcom_ice_program_key(host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - key->bytes, - key->crypto_cfg.data_unit_size / 512, - slot); + err = qcom_ice_program_key(host->ice, slot, key); ufshcd_release(hba); return err; } @@ -250,9 +262,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile, return err; } +static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size, + sw_secret); +} + +static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key); +} + +static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_generate_key(host->ice, lt_key); +} + +static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key); +} + static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = { .keyslot_program = ufs_qcom_ice_keyslot_program, .keyslot_evict = ufs_qcom_ice_keyslot_evict, + .derive_sw_secret = ufs_qcom_ice_derive_sw_secret, + .import_key = ufs_qcom_ice_import_key, + .generate_key = ufs_qcom_ice_generate_key, + .prepare_key = ufs_qcom_ice_prepare_key, }; #else @@ -452,10 +508,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) if (ret) return ret; - if (phy->power_count) { + if (phy->power_count) phy_power_off(phy); - phy_exit(phy); - } + /* phy initialization - calibrate the phy */ ret = phy_init(phy); @@ -477,6 +532,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) goto out_disable_phy; } + ret = phy_calibrate(phy); + if (ret) { + dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret); + goto out_disable_phy; + } + ufs_qcom_select_unipro_mode(host); return 0; @@ -497,11 +558,32 @@ out_disable_phy: */ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) { + int err; + + /* Enable UTP internal clock gating */ ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2); /* Ensure that HW clock gating is enabled before next operations */ ufshcd_readl(hba, REG_UFS_CFG2); + + /* Enable Unipro internal clock gating */ + err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK, + DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK, + PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL); +out: + if (err) + dev_err(hba->dev, "hw clk gating enabled failed\n"); } static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, @@ -543,13 +625,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, * * @hba: host controller instance * @is_pre_scale_up: flag to check if pre scale up condition. + * @freq: target opp freq * Return: zero for success and non-zero in case of a failure. */ -static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up) +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_clk_info *clki; - unsigned long core_clk_rate = 0; + unsigned long clk_freq = 0; u32 core_clk_cycles_per_us; /* @@ -561,22 +644,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up) if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) return 0; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk"); + if (clk_freq) + goto cfg_timers; + } + list_for_each_entry(clki, &hba->clk_list_head, list) { if (!strcmp(clki->name, "core_clk")) { + if (freq == ULONG_MAX) { + clk_freq = clki->max_freq; + break; + } + if (is_pre_scale_up) - core_clk_rate = clki->max_freq; + clk_freq = clki->max_freq; else - core_clk_rate = clk_get_rate(clki->clk); + clk_freq = clk_get_rate(clki->clk); break; } } +cfg_timers: /* If frequency is smaller than 1MHz, set to 1MHz */ - if (core_clk_rate < DEFAULT_CLK_RATE_HZ) - core_clk_rate = DEFAULT_CLK_RATE_HZ; + if (clk_freq < DEFAULT_CLK_RATE_HZ) + clk_freq = DEFAULT_CLK_RATE_HZ; - core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; + core_clk_cycles_per_us = clk_freq / USEC_PER_SEC; if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); /* @@ -596,13 +691,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: - if (ufs_qcom_cfg_timers(hba, false)) { + if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) { dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); return -EINVAL; } - err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX); + err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX); if (err) dev_err(hba->dev, "cfg core clk ctrl failed\n"); /* @@ -637,26 +732,17 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct phy *phy = host->generic_phy; if (status == PRE_CHANGE) return 0; - if (ufs_qcom_is_link_off(hba)) { - /* - * Disable the tx/rx lane symbol clocks before PHY is - * powered down as the PLL source should be disabled - * after downstream clocks are disabled. - */ + if (!ufs_qcom_is_link_active(hba)) ufs_qcom_disable_lane_clks(host); - phy_power_off(phy); - /* reset the connected UFS device during power down */ - ufs_qcom_device_reset_ctrl(hba, true); - } else if (!ufs_qcom_is_link_active(hba)) { - ufs_qcom_disable_lane_clks(host); - } + /* reset the connected UFS device during power down */ + if (ufs_qcom_is_link_off(hba) && host->device_reset) + ufs_qcom_device_reset_ctrl(hba, true); return ufs_qcom_ice_suspend(host); } @@ -664,26 +750,11 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct phy *phy = host->generic_phy; int err; - if (ufs_qcom_is_link_off(hba)) { - err = phy_power_on(phy); - if (err) { - dev_err(hba->dev, "%s: failed PHY power on: %d\n", - __func__, err); - return err; - } - - err = ufs_qcom_enable_lane_clks(host); - if (err) - return err; - - } else if (!ufs_qcom_is_link_active(hba)) { - err = ufs_qcom_enable_lane_clks(host); - if (err) - return err; - } + err = ufs_qcom_enable_lane_clks(host); + if (err) + return err; return ufs_qcom_ice_resume(host); } @@ -874,17 +945,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, break; case POST_CHANGE: - if (ufs_qcom_cfg_timers(hba, false)) { - dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", - __func__); - /* - * we return error code at the end of the routine, - * but continue to configure UFS_PHY_TX_LANE_ENABLE - * and bus voting as usual - */ - ret = -EINVAL; - } - /* cache the power mode parameters to use internally */ memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); @@ -1073,12 +1133,20 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) * @on: If true, enable clocks else disable them. * @status: PRE_CHANGE or POST_CHANGE notify * + * There are certain clocks which comes from the PHY so it needs + * to be managed together along with controller clocks which also + * provides a better power saving. Hence keep phy_power_off/on calls + * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be + * turned on/off along with UFS's clocks. + * * Return: 0 on success, non-zero on failure. */ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy; + int err; /* * In case ufs_qcom_init() is not yet done, simply ignore. @@ -1088,6 +1156,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, if (!host) return 0; + phy = host->generic_phy; + switch (status) { case PRE_CHANGE: if (on) { @@ -1097,10 +1167,22 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, /* disable device ref_clk */ ufs_qcom_dev_ref_clk_ctrl(host, false); } + + err = phy_power_off(phy); + if (err) { + dev_err(hba->dev, "phy power off failed, ret=%d\n", err); + return err; + } } break; case POST_CHANGE: if (on) { + err = phy_power_on(phy); + if (err) { + dev_err(hba->dev, "phy power on failed, ret = %d\n", err); + return err; + } + /* enable the device ref clock for HS mode*/ if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); @@ -1360,29 +1442,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg); } -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq) +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; struct ufs_clk_info *clki; u32 cycles_in_1us = 0; u32 core_clk_ctrl_reg; + unsigned long clk_freq; int err; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + if (clk_freq) { + cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ); + goto set_core_clk_ctrl; + } + } + list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && !strcmp(clki->name, "core_clk_unipro")) { - if (!clki->max_freq) + if (!clki->max_freq) { cycles_in_1us = 150; /* default for backwards compatibility */ - else if (freq == ULONG_MAX) + break; + } + + if (freq == ULONG_MAX) { cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); - else - cycles_in_1us = ceil(freq, HZ_PER_MHZ); + break; + } + if (is_scale_up) + cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); + else + cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ); break; } } +set_core_clk_ctrl: err = ufshcd_dme_get(hba, UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), &core_clk_ctrl_reg); @@ -1419,13 +1518,13 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long f { int ret; - ret = ufs_qcom_cfg_timers(hba, true); + ret = ufs_qcom_cfg_timers(hba, true, freq); if (ret) { dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__); return ret; } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, freq); + return ufs_qcom_set_core_clk_ctrl(hba, true, freq); } static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) @@ -1456,8 +1555,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq) { + int ret; + + ret = ufs_qcom_cfg_timers(hba, false, freq); + if (ret) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); + return ret; + } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, freq); + return ufs_qcom_set_core_clk_ctrl(hba, false, freq); } static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up, @@ -1609,6 +1715,85 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) return 0; } +static void ufs_qcom_dump_testbus(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int i, j, nminor = 0, testbus_len = 0; + u32 *testbus __free(kfree) = NULL; + char *prefix; + + testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + if (!testbus) + return; + + for (j = 0; j < TSTBUS_MAX; j++) { + nminor = testbus_info[j].nminor; + prefix = testbus_info[j].prefix; + host->testbus.select_major = j; + testbus_len = nminor * sizeof(u32); + for (i = 0; i < nminor; i++) { + host->testbus.select_minor = i; + ufs_qcom_testbus_config(host); + testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); + } + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, + 16, 4, testbus, testbus_len, false); + } +} + +static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix, enum ufshcd_res id) +{ + u32 *regs __free(kfree) = NULL; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) + return -EINVAL; + + regs = kzalloc(len, GFP_ATOMIC); + if (!regs) + return -ENOMEM; + + for (pos = 0; pos < len; pos += 4) + regs[pos / 4] = readl(hba->res[id].base + offset + pos); + + print_hex_dump(KERN_ERR, prefix, + len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, + 16, 4, regs, len, false); + + return 0; +} + +static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba) +{ + struct dump_info { + size_t offset; + size_t len; + const char *prefix; + enum ufshcd_res id; + }; + + struct dump_info mcq_dumps[] = { + {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ}, + {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ}, + {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS}, + {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD}, + {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD}, + {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD}, + {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD}, + {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD}, + {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD}, + {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD}, + {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD}, + }; + + for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) { + ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len, + mcq_dumps[i].prefix, mcq_dumps[i].id); + cond_resched(); + } +} + static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) { u32 reg; @@ -1616,6 +1801,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) host = ufshcd_get_variant(hba); + dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT)); + dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT)); + dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n", + ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT)); + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, "HCI Vendor Specific Registers "); @@ -1658,6 +1852,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT "); + + if (hba->mcq_enabled) { + reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ); + ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers "); + } + + /* ensure below dumps occur only in task context due to blocking calls. */ + if (in_task()) { + /* Dump MCQ Host Vendor Specific Registers */ + if (hba->mcq_enabled) + ufs_qcom_dump_mcq_hci_regs(hba); + + /* voluntarily yield the CPU as we are dumping too much data */ + ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); + cond_resched(); + ufs_qcom_dump_testbus(hba); + } } /** @@ -1687,7 +1898,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba) return 0; } -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, struct devfreq_dev_profile *p, struct devfreq_simple_ondemand_data *d) @@ -1699,13 +1909,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, hba->clk_scaling.suspend_on_no_request = true; } -#else -static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, - struct devfreq_dev_profile *p, - struct devfreq_simple_ondemand_data *data) -{ -} -#endif /* Resources */ static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { @@ -1849,25 +2052,38 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) ufshcd_mcq_config_esi(hba, msg); } +struct ufs_qcom_irq { + unsigned int irq; + unsigned int idx; + struct ufs_hba *hba; +}; + static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) { - struct msi_desc *desc = data; - struct device *dev = msi_desc_to_dev(desc); - struct ufs_hba *hba = dev_get_drvdata(dev); - u32 id = desc->msi_index; - struct ufs_hw_queue *hwq = &hba->uhq[id]; + struct ufs_qcom_irq *qi = data; + struct ufs_hba *hba = qi->hba; + struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; - ufshcd_mcq_write_cqis(hba, 0x1, id); + ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); ufshcd_mcq_poll_cqe_lock(hba, hwq); return IRQ_HANDLED; } +static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi) +{ + for (struct ufs_qcom_irq *q = uqi; q->irq; q++) + devm_free_irq(q->hba->dev, q->irq, q->hba); + + platform_device_msi_free_irqs_all(uqi->hba->dev); + devm_kfree(uqi->hba->dev, uqi); +} + +DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T)) + static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct msi_desc *desc; - struct msi_desc *failed_desc = NULL; int nr_irqs, ret; if (host->esi_enabled) @@ -1878,6 +2094,14 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; + + struct ufs_qcom_irq *qi __free(ufs_qcom_irq) = + devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + if (!qi) + return -ENOMEM; + /* Preset so __free() has a pointer to hba in all error paths */ + qi[0].hba = hba; + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { @@ -1885,48 +2109,79 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) return ret; } - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - ret = devm_request_irq(hba->dev, desc->irq, - ufs_qcom_mcq_esi_handler, - IRQF_SHARED, "qcom-mcq-esi", desc); + for (int idx = 0; idx < nr_irqs; idx++) { + qi[idx].irq = msi_get_virq(hba->dev, idx); + qi[idx].idx = idx; + qi[idx].hba = hba; + + ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, + IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", - __func__, desc->irq, ret); - failed_desc = desc; - break; + __func__, qi[idx].irq, ret); + qi[idx].irq = 0; + return ret; } } - msi_unlock_descs(hba->dev); - if (ret) { - /* Rewind */ - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - if (desc == failed_desc) - break; - devm_free_irq(hba->dev, desc->irq, hba); + retain_and_null_ptr(qi); + + if (host->hw_ver.major >= 6) { + ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), + REG_UFS_CFG3); + } + ufshcd_mcq_enable_esi(hba); + host->esi_enabled = true; + return 0; +} + +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name) +{ + struct ufs_clk_info *clki; + struct dev_pm_opp *opp; + unsigned long clk_freq; + int idx = 0; + bool found = false; + + opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true); + if (IS_ERR(opp)) { + dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq); + return 0; + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, name)) { + found = true; + break; } - msi_unlock_descs(hba->dev); - platform_device_msi_free_irqs_all(hba->dev); - } else { - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) - ufshcd_rmwl(hba, ESI_VEC_MASK, - FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), - REG_UFS_CFG3); - ufshcd_mcq_enable_esi(hba); - host->esi_enabled = true; + + idx++; } - return ret; + if (!found) { + dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name); + dev_pm_opp_put(opp); + return 0; + } + + clk_freq = dev_pm_opp_get_freq_indexed(opp, idx); + + dev_pm_opp_put(opp); + + return clk_freq; } static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) { - u32 gear = 0; + u32 gear = UFS_HS_DONT_CHANGE; + unsigned long unipro_freq; - switch (freq) { + if (!hba->use_pm_opp) + return gear; + + unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + switch (unipro_freq) { case 403000000: gear = UFS_HS_G5; break; @@ -1946,10 +2201,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) break; default: dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq); - break; + return UFS_HS_DONT_CHANGE; } - return gear; + return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); } /* diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 05d4cb569c50..e0e129af7c16 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -24,6 +24,15 @@ #define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B +/* bit and mask definitions for PA_VS_CLK_CFG_REG attribute */ +#define PA_VS_CLK_CFG_REG 0x9004 +#define PA_VS_CLK_CFG_REG_MASK GENMASK(8, 0) + +/* bit and mask definitions for DL_VS_CLK_CFG attribute */ +#define DL_VS_CLK_CFG 0xA00B +#define DL_VS_CLK_CFG_MASK GENMASK(9, 0) +#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9) + /* QCOM UFS host controller vendor specific registers */ enum { REG_UFS_SYS1CLK_1US = 0xC0, @@ -50,6 +59,8 @@ enum { */ UFS_AH8_CFG = 0xFC, + UFS_RD_REG_MCQ = 0xD00, + REG_UFS_MEM_ICE_CONFIG = 0x260C, REG_UFS_MEM_ICE_NUM_CORE = 0x2664, @@ -75,6 +86,15 @@ enum { UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, }; +/* QCOM UFS HC vendor specific Hibern8 count registers */ +enum { + REG_UFS_HW_H8_ENTER_CNT = 0x2700, + REG_UFS_SW_H8_ENTER_CNT = 0x2704, + REG_UFS_SW_AFTER_HW_H8_ENTER_CNT = 0x2708, + REG_UFS_HW_H8_EXIT_CNT = 0x270C, + REG_UFS_SW_H8_EXIT_CNT = 0x2710, +}; + enum { UFS_MEM_CQIS_VS = 0x8, }; diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 996387906aa1..b39239f641f2 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -22,17 +22,12 @@ #define MAX_SUPP_MAC 64 -struct ufs_host { - void (*late_init)(struct ufs_hba *hba); -}; - enum intel_ufs_dsm_func_id { INTEL_DSM_FNS = 0, INTEL_DSM_RESET = 1, }; struct intel_host { - struct ufs_host ufs_host; u32 dsm_fns; u32 active_ltr; u32 idle_ltr; @@ -408,8 +403,14 @@ static int ufs_intel_ehl_init(struct ufs_hba *hba) return ufs_intel_common_init(hba); } -static void ufs_intel_lkf_late_init(struct ufs_hba *hba) +static int ufs_intel_lkf_init(struct ufs_hba *hba) { + int err; + + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + hba->caps |= UFSHCD_CAP_CRYPTO; + err = ufs_intel_common_init(hba); /* LKF always needs a full reset, so set PM accordingly */ if (hba->caps & UFSHCD_CAP_DEEPSLEEP) { hba->spm_lvl = UFS_PM_LVL_6; @@ -418,19 +419,6 @@ static void ufs_intel_lkf_late_init(struct ufs_hba *hba) hba->spm_lvl = UFS_PM_LVL_5; hba->rpm_lvl = UFS_PM_LVL_5; } -} - -static int ufs_intel_lkf_init(struct ufs_hba *hba) -{ - struct ufs_host *ufs_host; - int err; - - hba->nop_out_timeout = 200; - hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; - hba->caps |= UFSHCD_CAP_CRYPTO; - err = ufs_intel_common_init(hba); - ufs_host = ufshcd_get_variant(hba); - ufs_host->late_init = ufs_intel_lkf_late_init; return err; } @@ -444,6 +432,8 @@ static int ufs_intel_adl_init(struct ufs_hba *hba) static int ufs_intel_mtl_init(struct ufs_hba *hba) { + hba->rpm_lvl = UFS_PM_LVL_2; + hba->spm_lvl = UFS_PM_LVL_2; hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; return ufs_intel_common_init(hba); } @@ -574,7 +564,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) static int ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct ufs_host *ufs_host; struct ufs_hba *hba; void __iomem *mmio_base; int err; @@ -607,10 +596,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - ufs_host = ufshcd_get_variant(hba); - if (ufs_host && ufs_host->late_init) - ufs_host->late_init(hba); - pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); |