summaryrefslogtreecommitdiff
path: root/drivers/ufs/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r--drivers/ufs/core/ufs-mcq.c18
-rw-r--r--drivers/ufs/core/ufs-sysfs.c384
-rw-r--r--drivers/ufs/core/ufshcd-priv.h1
-rw-r--r--drivers/ufs/core/ufshcd.c423
4 files changed, 691 insertions, 135 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 240ce135bbfb..1e50675772fe 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -674,16 +674,8 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
- unsigned long flags;
int err;
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- return FAILED;
- }
-
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
@@ -692,6 +684,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
@@ -715,10 +712,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
return FAILED;
}
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
-
return SUCCESS;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 90b5ab60f5ae..4bd7d491e3c5 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -5,6 +5,7 @@
#include <linux/string.h>
#include <linux/bitfield.h>
#include <linux/unaligned.h>
+#include <linux/string_choices.h>
#include <ufs/ufs.h>
#include <ufs/unipro.h>
@@ -57,6 +58,53 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
}
}
+static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint)
+{
+ switch (hint) {
+ case WB_RESIZE_HINT_KEEP:
+ return "keep";
+ case WB_RESIZE_HINT_DECREASE:
+ return "decrease";
+ case WB_RESIZE_HINT_INCREASE:
+ return "increase";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status)
+{
+ switch (status) {
+ case WB_RESIZE_STATUS_IDLE:
+ return "idle";
+ case WB_RESIZE_STATUS_IN_PROGRESS:
+ return "in_progress";
+ case WB_RESIZE_STATUS_COMPLETE_SUCCESS:
+ return "complete_success";
+ case WB_RESIZE_STATUS_GENERAL_FAILURE:
+ return "general_failure";
+ default:
+ return "unknown";
+ }
+}
+
+static const char * const ufs_hid_states[] = {
+ [HID_IDLE] = "idle",
+ [ANALYSIS_IN_PROGRESS] = "analysis_in_progress",
+ [DEFRAG_REQUIRED] = "defrag_required",
+ [DEFRAG_IN_PROGRESS] = "defrag_in_progress",
+ [DEFRAG_COMPLETED] = "defrag_completed",
+ [DEFRAG_NOT_REQUIRED] = "defrag_not_required",
+};
+
+static const char *ufs_hid_state_to_string(enum ufs_hid_state state)
+{
+ if (state < NUM_UFS_HID_STATES)
+ return ufs_hid_states[state];
+
+ return "unknown";
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -411,6 +459,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev,
return count;
}
+static const char * const wb_resize_en_mode[] = {
+ [WB_RESIZE_EN_IDLE] = "idle",
+ [WB_RESIZE_EN_DECREASE] = "decrease",
+ [WB_RESIZE_EN_INCREASE] = "increase",
+};
+
+static ssize_t wb_resize_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ mode = sysfs_match_string(wb_resize_en_mode, buf);
+ if (mode < 0)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ res = ufshcd_wb_set_resize_en(hba, mode);
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
/**
* pm_qos_enable_show - sysfs handler to show pm qos enable value
* @dev: device associated with the UFS controller
@@ -466,6 +552,56 @@ static ssize_t critical_health_show(struct device *dev,
return sysfs_emit(buf, "%d\n", hba->critical_health_count);
}
+static ssize_t device_lvl_exception_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count));
+}
+
+static ssize_t device_lvl_exception_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int value;
+
+ if (kstrtouint(buf, 0, &value))
+ return -EINVAL;
+
+ /* the only supported usecase is to reset the dev_lvl_exception_count */
+ if (value)
+ return -EINVAL;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+
+ return count;
+}
+
+static ssize_t device_lvl_exception_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u64 exception_id;
+ int err;
+
+ ufshcd_rpm_get_sync(hba);
+ err = ufshcd_read_device_lvl_exception_id(hba, &exception_id);
+ ufshcd_rpm_put_sync(hba);
+
+ if (err)
+ return err;
+
+ hba->dev_lvl_exception_id = exception_id;
+ return sysfs_emit(buf, "%llu\n", exception_id);
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -476,9 +612,12 @@ static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
+static DEVICE_ATTR_WO(wb_resize_enable);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
static DEVICE_ATTR_RO(critical_health);
+static DEVICE_ATTR_RW(device_lvl_exception_count);
+static DEVICE_ATTR_RO(device_lvl_exception_id);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -491,9 +630,12 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_on.attr,
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
+ &dev_attr_wb_resize_enable.attr,
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
&dev_attr_critical_health.attr,
+ &dev_attr_device_lvl_exception_count.attr,
+ &dev_attr_device_lvl_exception_id.attr,
NULL
};
@@ -1375,7 +1517,7 @@ static ssize_t _name##_show(struct device *dev, \
ret = -EINVAL; \
goto out; \
} \
- ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
+ ret = sysfs_emit(buf, "%s\n", str_true_false(flag)); \
out: \
up(&hba->host_sem); \
return ret; \
@@ -1495,6 +1637,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
+static int wb_read_resize_attrs(struct ufs_hba *hba,
+ enum attr_idn idn, u32 *attr_val)
+{
+ u8 index = 0;
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ index = ufshcd_wb_get_query_index(hba);
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ idn, index, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t wb_resize_hint_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_hint);
+
+static ssize_t wb_resize_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_status);
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -1568,6 +1771,8 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_wb_avail_buf.attr,
&dev_attr_wb_life_time_est.attr,
&dev_attr_wb_cur_buf.attr,
+ &dev_attr_wb_resize_hint.attr,
+ &dev_attr_wb_resize_status.attr,
NULL,
};
@@ -1576,6 +1781,178 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
.attrs = ufs_sysfs_attributes,
};
+static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u32 *attr_val)
+{
+ int ret;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t analysis_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ if (sysfs_streq(buf, "enable"))
+ mode = HID_ANALYSIS_ENABLE;
+ else if (sysfs_streq(buf, "disable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
+ else
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_WO(analysis_trigger);
+
+static ssize_t defrag_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ if (sysfs_streq(buf, "enable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_ENABLE;
+ else if (sysfs_streq(buf, "disable"))
+ mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
+ else
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_WO(defrag_trigger);
+
+static ssize_t fragmented_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static DEVICE_ATTR_RO(fragmented_size);
+
+static ssize_t defrag_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_SIZE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static ssize_t defrag_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_HID_SIZE, &value);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(defrag_size);
+
+static ssize_t progress_ratio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static DEVICE_ATTR_RO(progress_ratio);
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int ret;
+
+ ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_HID_STATE, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value));
+}
+
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *ufs_sysfs_hid[] = {
+ &dev_attr_analysis_trigger.attr,
+ &dev_attr_defrag_trigger.attr,
+ &dev_attr_fragmented_size.attr,
+ &dev_attr_defrag_size.attr,
+ &dev_attr_progress_ratio.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return hba->dev_info.hid_sup ? attr->mode : 0;
+}
+
+static const struct attribute_group ufs_sysfs_hid_group = {
+ .name = "hid",
+ .attrs = ufs_sysfs_hid,
+ .is_visible = ufs_sysfs_hid_is_visible,
+};
+
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
@@ -1590,6 +1967,7 @@ static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_string_descriptors_group,
&ufs_sysfs_flags_group,
&ufs_sysfs_attributes_group,
+ &ufs_sysfs_hid_group,
NULL,
};
@@ -1621,7 +1999,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
-UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
+UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
@@ -1638,7 +2016,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_logical_block_count.attr,
&dev_attr_erase_block_size.attr,
&dev_attr_provisioning_type.attr,
- &dev_attr_physical_memory_resourse_count.attr,
+ &dev_attr_physical_memory_resource_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
&dev_attr_wb_buf_alloc_units.attr,
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 10b4a19a70f1..d0a2c963a27d 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 0534390c2a35..efd7a811a002 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -53,7 +53,7 @@
/* UIC command timeout, unit: ms */
enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
- UIC_CMD_TIMEOUT_MAX = 2000,
+ UIC_CMD_TIMEOUT_MAX = 5000,
};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
@@ -63,7 +63,11 @@ enum {
/* Query request retries */
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+enum {
+ QUERY_REQ_TIMEOUT_MIN = 1,
+ QUERY_REQ_TIMEOUT_DEFAULT = 1500,
+ QUERY_REQ_TIMEOUT_MAX = 30000
+};
/* Advanced RPMB request timeout */
#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
@@ -133,7 +137,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = {
module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
MODULE_PARM_DESC(uic_cmd_timeout,
- "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively");
+
+static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT;
+
+static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN,
+ QUERY_REQ_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops dev_cmd_timeout_ops = {
+ .set = dev_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644);
+MODULE_PARM_DESC(dev_cmd_timeout,
+ "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
@@ -278,6 +299,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+ UFS_DEVICE_QUIRK_PA_HIBER8TIME |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
@@ -342,6 +364,34 @@ void ufshcd_disable_irq(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val | intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val & ~intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
@@ -431,7 +481,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
u8 opcode = 0, group_id = 0;
u32 doorbell = 0;
u32 intr;
- int hwq_id = -1;
+ u32 hwq_id = 0;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -643,9 +693,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
- dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- div_u64(hba->ufs_stats.last_intr_ts, 1000),
- hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
@@ -1378,6 +1425,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
+ mutex_lock(&hba->host->scan_mutex);
blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
@@ -1388,6 +1436,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
goto out;
}
@@ -1409,6 +1458,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
ufshcd_release(hba);
}
@@ -2544,7 +2594,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Return: 0 only if success.
+ * Return: 0 if successful; < 0 upon failure.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
@@ -2574,6 +2624,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ unsigned long flags;
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
@@ -2583,6 +2634,10 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -2660,32 +2715,6 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_enable_intr - enable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set |= intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_disable_intr - disable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set &= ~intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
* ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @hba: per adapter instance
@@ -2804,8 +2833,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(ucd_req_ptr + 1, query->descriptor, len);
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -2818,8 +2845,6 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
.transaction_code = UPIU_TRANSACTION_NOP_OUT,
.task_tag = lrbp->task_tag,
};
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -2845,6 +2870,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
else
ret = -EINVAL;
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
return ret;
}
@@ -3052,6 +3079,9 @@ static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
hba->dev_cmd.type = cmd_type;
}
+/*
+ * Return: 0 upon success; < 0 upon failure.
+ */
static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
{
@@ -3164,9 +3194,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
}
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
+/*
+ * Return: 0 upon success; < 0 upon failure.
+ */
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
{
@@ -3176,16 +3210,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
int err;
retry:
- time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
time_left);
if (likely(time_left)) {
- /*
- * The completion handler called complete() and the caller of
- * this function still owns the @lrbp tag so the code below does
- * not trigger any race conditions.
- */
- hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
@@ -3199,7 +3227,6 @@ retry:
/* successfully cleared the command, retry if needed */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
err = -EAGAIN;
- hba->dev_cmd.complete = NULL;
return err;
}
@@ -3215,11 +3242,9 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending) {
- hba->dev_cmd.complete = NULL;
+ if (pending)
__clear_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- }
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3237,8 +3262,6 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending)
- hba->dev_cmd.complete = NULL;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3252,6 +3275,7 @@ retry:
}
}
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3269,16 +3293,15 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
ufshcd_release(hba);
}
+/*
+ * Return: 0 upon success; < 0 upon failure.
+ */
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
{
- DECLARE_COMPLETION_ONSTACK(wait);
int err;
- hba->dev_cmd.complete = &wait;
-
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -3360,6 +3383,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
__func__, opcode, idn, ret, retries);
+ WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
@@ -3371,7 +3395,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 for success; < 0 upon failure.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
@@ -3379,7 +3403,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, selector = 0;
- int timeout = QUERY_REQ_TIMEOUT;
+ int timeout = dev_cmd_timeout;
BUG_ON(!hba);
@@ -3427,6 +3451,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
ufshcd_dev_man_unlock(hba);
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3439,7 +3464,7 @@ out_unlock:
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
@@ -3476,7 +3501,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -3488,6 +3513,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
ufshcd_dev_man_unlock(hba);
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3502,7 +3528,7 @@ out_unlock:
* @attr_val: the attribute value after the query request
* completes
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 for success; < 0 upon failure.
*/
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
@@ -3525,9 +3551,13 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: query attribute, idn %d, failed with error %d after %d retries\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
+ WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
+/*
+ * Return: 0 if successful; < 0 upon failure.
+ */
static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len)
@@ -3572,7 +3602,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -3585,6 +3615,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
ufshcd_dev_man_unlock(hba);
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3601,7 +3632,7 @@ out_unlock:
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*
- * Return: 0 for success, non-zero in case of failure.
+ * Return: 0 for success; < 0 upon failure.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -3619,6 +3650,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
break;
}
+ WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3631,7 +3663,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success, non-zero otherwise.
+ * Return: 0 in case of success; < 0 upon failure.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
@@ -3698,6 +3730,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
out:
if (is_kmalloc)
kfree(desc_buf);
+ WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
@@ -3811,7 +3844,7 @@ out:
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success, non-zero otherwise.
+ * Return: 0 in case of success; < 0 upon failure.
*/
static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
int lun,
@@ -4247,6 +4280,30 @@ out:
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
/**
+ * ufshcd_dme_rmw - get modify set a DME attribute
+ * @hba: per adapter instance
+ * @mask: indicates which bits to clear from the value that has been read
+ * @val: actual value to write
+ * @attr: dme attribute
+ */
+int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask,
+ u32 val, u32 attr)
+{
+ u32 cfg = 0;
+ int err;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg);
+ if (err)
+ return err;
+
+ cfg &= ~mask;
+ cfg |= (val & mask);
+
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg);
+}
+EXPORT_SYMBOL_GPL(ufshcd_dme_rmw);
+
+/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
*
@@ -4268,7 +4325,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
- bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -4279,15 +4335,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
goto out_unlock;
}
hba->uic_async_done = &uic_async_done;
- if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
- ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
- /*
- * Make sure UIC command completion interrupt is disabled before
- * issuing UIC command.
- */
- ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- reenable_intr = true;
- }
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
@@ -4331,9 +4379,7 @@ out:
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
- if (reenable_intr)
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
- if (ret) {
+ if (ret && !hba->pm_op_in_progress) {
ufshcd_set_link_broken(hba);
ufshcd_schedule_eh_work(hba);
}
@@ -4341,6 +4387,14 @@ out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
+ /*
+ * If the h8 exit fails during the runtime resume process, it becomes
+ * stuck and cannot be recovered through the error handler. To fix
+ * this, use link recovery instead of the error handler.
+ */
+ if (ret && hba->pm_op_in_progress)
+ ret = ufshcd_link_recovery(hba);
+
return ret;
}
@@ -4355,28 +4409,17 @@ int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
+ if (uic_cmd->argument1 != UIC_ARG_MIB(PA_PWRMODE) ||
+ uic_cmd->command != UIC_CMD_DME_SET)
+ return ufshcd_send_uic_cmd(hba, uic_cmd);
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
ufshcd_hold(hba);
-
- if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
- uic_cmd->command == UIC_CMD_DME_SET) {
- ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
- goto out;
- }
-
- mutex_lock(&hba->uic_cmd_mutex);
- ufshcd_add_delay_before_dme_cmd(hba);
-
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
- if (!ret)
- ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
-
- mutex_unlock(&hba->uic_cmd_mutex);
-
-out:
+ ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
ufshcd_release(hba);
+
return ret;
}
@@ -4789,7 +4832,7 @@ out:
* 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
- * Return: 0 on success, non-zero value on failure.
+ * Return: 0 if successful; < 0 upon failure.
*/
int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
@@ -5585,12 +5628,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- } else if (hba->dev_cmd.complete) {
+ } else {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
- complete(hba->dev_cmd.complete);
+ complete(&hba->dev_cmd.complete);
}
}
@@ -5692,6 +5735,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
continue;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ continue;
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -6013,6 +6058,42 @@ out:
__func__, err);
}
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
+{
+ struct utp_upiu_query_v4_0 *upiu_resp;
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+
+ ufshcd_init_query(hba, &request, &response,
+ UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0);
+
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
+
+ if (err) {
+ dev_err(hba->dev, "%s: failed to read device level exception %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ upiu_resp = (struct utp_upiu_query_v4_0 *)response;
+ *exception_id = get_unaligned_be64(&upiu_resp->osf3);
+out:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ return err;
+}
+
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6083,7 +6164,22 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
-static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode)
+{
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
u32 cur_buf;
@@ -6165,15 +6261,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
}
/*
- * The ufs device needs the vcc to be ON to flush.
* With user-space reduction enabled, it's enough to enable flush
* by checking only the available buffer. The threshold
* defined here is > 90% full.
* With user-space preserved enabled, the current-buffer
* should be checked too because the wb buffer size can reduce
* when disk tends to be full. This info is provided by current
- * buffer (dCurrentWriteBoosterBufferSize). There's no point in
- * keeping vcc on when current buffer is empty.
+ * buffer (dCurrentWriteBoosterBufferSize).
*/
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -6188,7 +6282,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
if (!hba->dev_info.b_presrv_uspc_en)
return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
- return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+ return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf);
}
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
@@ -6240,6 +6334,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
}
+ if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) {
+ atomic_inc(&hba->dev_lvl_exception_count);
+ sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count");
+ }
+
ufs_debugfs_exception_event(hba, status);
}
@@ -6545,7 +6644,7 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
dev_info(hba->dev,
- "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
+ "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n",
__func__, ufshcd_state_name[hba->ufshcd_state],
hba->is_powered, hba->shutting_down, hba->saved_err,
hba->saved_uic_err, hba->force_reset,
@@ -6560,9 +6659,14 @@ static void ufshcd_err_handler(struct work_struct *work)
up(&hba->host_sem);
return;
}
- ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ufshcd_err_handling_prepare(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -6974,7 +7078,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
}
/**
- * ufshcd_intr - Main interrupt service routine
+ * ufshcd_threaded_intr - Threaded interrupt service routine
* @irq: irq number
* @__hba: pointer to adapter instance
*
@@ -6982,16 +7086,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_intr(int irq, void *__hba)
+static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status = 0;
+ u32 last_intr_status, intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = local_clock();
+ last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -7015,7 +7117,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
- hba->ufs_stats.last_intr_status,
+ last_intr_status,
enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -7023,6 +7125,34 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
return retval;
}
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Return:
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_WAKE_THREAD - If handling is moved to threaded handled
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+ u32 intr_status, enabled_intr_status;
+
+ /* Move interrupt handling to thread when MCQ & ESI are not enabled */
+ if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
+ return IRQ_WAKE_THREAD;
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+
+ /* Directly handle interrupts since MCQ ESI handlers does the hard job */
+ return ufshcd_sl_intr(hba, enabled_intr_status);
+}
+
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
@@ -7218,7 +7348,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
- ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
+ ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7238,8 +7368,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
err = -EINVAL;
}
}
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
return err;
}
@@ -7720,7 +7848,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, ULONG_MAX, true);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_scale_clks(hba, ULONG_MAX, true);
err = ufshcd_hba_enable(hba);
@@ -8082,6 +8211,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
*/
dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->ext_wb_sup = get_unaligned_be16(desc_buf +
+ DEVICE_DESC_PARAM_EXT_WB_SUP);
+
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
@@ -8139,6 +8271,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u32 ext_ufs_feature;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP))
+ return;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+ ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION);
+}
+
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8313,6 +8461,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
+ dev_info->hid_sup = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP) &
+ UFS_DEV_HID_SUPPORT;
+
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
@@ -8339,6 +8491,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufs_init_rtc(hba, desc_buf);
+ ufshcd_device_lvl_exception_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -8428,6 +8582,31 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
+ * to ensure proper hibernation timing. This function retrieves the current
+ * PA_HIBERN8TIME value and increments it by 100us.
+ */
+static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
+{
+ u32 pa_h8time;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
+ return;
+ }
+
+ /* Increment by 1 to increase hibernation time by 100 µs */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
+ if (ret)
+ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
ufshcd_vops_apply_dev_quirks(hba);
@@ -8438,6 +8617,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
+ ufshcd_quirk_override_pa_h8time(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -8607,7 +8789,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
@@ -8722,6 +8904,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
+ hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
intrs = UFSHCD_ENABLE_MCQ_INTRS;
@@ -10338,8 +10521,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = devm_add_action_or_reset(dev, ufshcd_devres_release,
host);
if (err)
- return dev_err_probe(dev, err,
- "failed to add ufshcd dealloc action\n");
+ return err;
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
@@ -10490,6 +10672,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ init_completion(&hba->dev_cmd.complete);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -10581,7 +10765,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
- err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr,
+ IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
goto out_disable;