summaryrefslogtreecommitdiff
path: root/drivers/ufs/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r--drivers/ufs/core/ufs-mcq.c18
-rw-r--r--drivers/ufs/core/ufs-sysfs.c197
-rw-r--r--drivers/ufs/core/ufs_bsg.c6
-rw-r--r--drivers/ufs/core/ufs_trace.h135
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c7
-rw-r--r--drivers/ufs/core/ufshcd-priv.h22
-rw-r--r--drivers/ufs/core/ufshcd.c425
7 files changed, 600 insertions, 210 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 240ce135bbfb..1e50675772fe 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -674,16 +674,8 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
- unsigned long flags;
int err;
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- return FAILED;
- }
-
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
@@ -692,6 +684,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
@@ -715,10 +712,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
return FAILED;
}
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
-
return SUCCESS;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 3438269a5440..de8b6acd4058 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -57,6 +57,36 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
}
}
+static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint)
+{
+ switch (hint) {
+ case WB_RESIZE_HINT_KEEP:
+ return "keep";
+ case WB_RESIZE_HINT_DECREASE:
+ return "decrease";
+ case WB_RESIZE_HINT_INCREASE:
+ return "increase";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status)
+{
+ switch (status) {
+ case WB_RESIZE_STATUS_IDLE:
+ return "idle";
+ case WB_RESIZE_STATUS_IN_PROGRESS:
+ return "in_progress";
+ case WB_RESIZE_STATUS_COMPLETE_SUCCESS:
+ return "complete_success";
+ case WB_RESIZE_STATUS_GENERAL_FAILURE:
+ return "general_failure";
+ default:
+ return "unknown";
+ }
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -411,6 +441,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev,
return count;
}
+static const char * const wb_resize_en_mode[] = {
+ [WB_RESIZE_EN_IDLE] = "idle",
+ [WB_RESIZE_EN_DECREASE] = "decrease",
+ [WB_RESIZE_EN_INCREASE] = "increase",
+};
+
+static ssize_t wb_resize_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ mode = sysfs_match_string(wb_resize_en_mode, buf);
+ if (mode < 0)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ res = ufshcd_wb_set_resize_en(hba, mode);
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
/**
* pm_qos_enable_show - sysfs handler to show pm qos enable value
* @dev: device associated with the UFS controller
@@ -458,6 +526,64 @@ static ssize_t pm_qos_enable_store(struct device *dev,
return count;
}
+static ssize_t critical_health_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->critical_health_count);
+}
+
+static ssize_t device_lvl_exception_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count));
+}
+
+static ssize_t device_lvl_exception_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int value;
+
+ if (kstrtouint(buf, 0, &value))
+ return -EINVAL;
+
+ /* the only supported usecase is to reset the dev_lvl_exception_count */
+ if (value)
+ return -EINVAL;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+
+ return count;
+}
+
+static ssize_t device_lvl_exception_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u64 exception_id;
+ int err;
+
+ ufshcd_rpm_get_sync(hba);
+ err = ufshcd_read_device_lvl_exception_id(hba, &exception_id);
+ ufshcd_rpm_put_sync(hba);
+
+ if (err)
+ return err;
+
+ hba->dev_lvl_exception_id = exception_id;
+ return sysfs_emit(buf, "%llu\n", exception_id);
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -468,8 +594,12 @@ static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
+static DEVICE_ATTR_WO(wb_resize_enable);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
+static DEVICE_ATTR_RO(critical_health);
+static DEVICE_ATTR_RW(device_lvl_exception_count);
+static DEVICE_ATTR_RO(device_lvl_exception_id);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -482,8 +612,12 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_on.attr,
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
+ &dev_attr_wb_resize_enable.attr,
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
+ &dev_attr_critical_health.attr,
+ &dev_attr_device_lvl_exception_count.attr,
+ &dev_attr_device_lvl_exception_id.attr,
NULL
};
@@ -1485,6 +1619,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
+static int wb_read_resize_attrs(struct ufs_hba *hba,
+ enum attr_idn idn, u32 *attr_val)
+{
+ u8 index = 0;
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ index = ufshcd_wb_get_query_index(hba);
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ idn, index, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t wb_resize_hint_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_hint);
+
+static ssize_t wb_resize_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_status);
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -1558,6 +1753,8 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_wb_avail_buf.attr,
&dev_attr_wb_life_time_est.attr,
&dev_attr_wb_cur_buf.attr,
+ &dev_attr_wb_resize_hint.attr,
+ &dev_attr_wb_resize_status.attr,
NULL,
};
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 8d4ad0a3f2cf..252186124669 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -194,10 +194,12 @@ out:
ufshcd_rpm_put_sync(hba);
kfree(buff);
bsg_reply->result = ret;
- job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
- if (ret == 0)
+ if (ret == 0) {
+ job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
+ sizeof(struct ufs_bsg_reply);
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+ }
return ret;
}
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
index 84deca2b841d..caa32e23ffa5 100644
--- a/drivers/ufs/core/ufs_trace.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -83,34 +83,34 @@ UFS_CMD_TRACE_TSF_TYPES
TRACE_EVENT(ufshcd_clk_gating,
- TP_PROTO(const char *dev_name, int state),
+ TP_PROTO(struct ufs_hba *hba, int state),
- TP_ARGS(dev_name, state),
+ TP_ARGS(hba, state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(int, state)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->state = state;
),
TP_printk("%s: gating state changed to %s",
- __get_str(dev_name),
+ dev_name(__entry->hba->dev),
__print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
);
TRACE_EVENT(ufshcd_clk_scaling,
- TP_PROTO(const char *dev_name, const char *state, const char *clk,
+ TP_PROTO(struct ufs_hba *hba, const char *state, const char *clk,
u32 prev_state, u32 curr_state),
- TP_ARGS(dev_name, state, clk, prev_state, curr_state),
+ TP_ARGS(hba, state, clk, prev_state, curr_state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(state, state)
__string(clk, clk)
__field(u32, prev_state)
@@ -118,7 +118,7 @@ TRACE_EVENT(ufshcd_clk_scaling,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(state);
__assign_str(clk);
__entry->prev_state = prev_state;
@@ -126,80 +126,80 @@ TRACE_EVENT(ufshcd_clk_scaling,
),
TP_printk("%s: %s %s from %u to %u Hz",
- __get_str(dev_name), __get_str(state), __get_str(clk),
+ dev_name(__entry->hba->dev), __get_str(state), __get_str(clk),
__entry->prev_state, __entry->curr_state)
);
TRACE_EVENT(ufshcd_auto_bkops_state,
- TP_PROTO(const char *dev_name, const char *state),
+ TP_PROTO(struct ufs_hba *hba, const char *state),
- TP_ARGS(dev_name, state),
+ TP_ARGS(hba, state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(state, state)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(state);
),
TP_printk("%s: auto bkops - %s",
- __get_str(dev_name), __get_str(state))
+ dev_name(__entry->hba->dev), __get_str(state))
);
DECLARE_EVENT_CLASS(ufshcd_profiling_template,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err),
+ TP_ARGS(hba, profile_info, time_us, err),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(profile_info, profile_info)
__field(s64, time_us)
__field(int, err)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(profile_info);
__entry->time_us = time_us;
__entry->err = err;
),
TP_printk("%s: %s: took %lld usecs, err %d",
- __get_str(dev_name), __get_str(profile_info),
+ dev_name(__entry->hba->dev), __get_str(profile_info),
__entry->time_us, __entry->err)
);
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DECLARE_EVENT_CLASS(ufshcd_template,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state),
+ TP_ARGS(hba, err, usecs, dev_state, link_state),
TP_STRUCT__entry(
__field(s64, usecs)
__field(int, err)
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(int, dev_state)
__field(int, link_state)
),
@@ -207,14 +207,14 @@ DECLARE_EVENT_CLASS(ufshcd_template,
TP_fast_assign(
__entry->usecs = usecs;
__entry->err = err;
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->dev_state = dev_state;
__entry->link_state = link_state;
),
TP_printk(
"%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
- __get_str(dev_name),
+ dev_name(__entry->hba->dev),
__entry->usecs,
__print_symbolic(__entry->dev_state, UFS_PWR_MODES),
__print_symbolic(__entry->link_state, UFS_LINK_STATES),
@@ -223,60 +223,62 @@ DECLARE_EVENT_CLASS(ufshcd_template,
);
DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_init,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
TRACE_EVENT(ufshcd_command,
- TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
+ TP_PROTO(struct scsi_device *sdev, struct ufs_hba *hba,
+ enum ufs_trace_str_t str_t,
unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
u32 intr, u64 lba, u8 opcode, u8 group_id),
- TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
+ TP_ARGS(sdev, hba, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
opcode, group_id),
TP_STRUCT__entry(
__field(struct scsi_device *, sdev)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
@@ -290,6 +292,7 @@ TRACE_EVENT(ufshcd_command,
TP_fast_assign(
__entry->sdev = sdev;
+ __entry->hba = hba;
__entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
@@ -312,13 +315,13 @@ TRACE_EVENT(ufshcd_command,
);
TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
+ TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, u32 cmd,
u32 arg1, u32 arg2, u32 arg3),
- TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
+ TP_ARGS(hba, str_t, cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__field(u32, cmd)
__field(u32, arg1)
@@ -327,7 +330,7 @@ TRACE_EVENT(ufshcd_uic_command,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->str_t = str_t;
__entry->cmd = cmd;
__entry->arg1 = arg1;
@@ -337,19 +340,19 @@ TRACE_EVENT(ufshcd_uic_command,
TP_printk(
"%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev),
__entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
)
);
TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
+ TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, void *hdr,
void *tsf, enum ufs_trace_tsf_t tsf_t),
- TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
+ TP_ARGS(hba, str_t, hdr, tsf, tsf_t),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__array(unsigned char, hdr, 12)
__array(unsigned char, tsf, 16)
@@ -357,7 +360,7 @@ TRACE_EVENT(ufshcd_upiu,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->str_t = str_t;
memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
@@ -366,7 +369,7 @@ TRACE_EVENT(ufshcd_upiu,
TP_printk(
"%s: %s: HDR:%s, %s:%s",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev),
__print_hex(__entry->hdr, sizeof(__entry->hdr)),
show_ufs_cmd_trace_tsf(__entry->tsf_t),
__print_hex(__entry->tsf, sizeof(__entry->tsf))
@@ -375,22 +378,22 @@ TRACE_EVENT(ufshcd_upiu,
TRACE_EVENT(ufshcd_exception_event,
- TP_PROTO(const char *dev_name, u16 status),
+ TP_PROTO(struct ufs_hba *hba, u16 status),
- TP_ARGS(dev_name, status),
+ TP_ARGS(hba, status),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(u16, status)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->status = status;
),
TP_printk("%s: status 0x%x",
- __get_str(dev_name), __entry->status
+ dev_name(__entry->hba->dev), __entry->status
)
);
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 694ff7578fc1..9e63a9d3cb7e 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -72,11 +72,11 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
- memcpy(cfg.crypto_key, key->raw, key->size/2);
+ memcpy(cfg.crypto_key, key->bytes, key->size/2);
memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2,
- key->raw + key->size/2, key->size/2);
+ key->bytes + key->size/2, key->size/2);
} else {
- memcpy(cfg.crypto_key, key->raw, key->size);
+ memcpy(cfg.crypto_key, key->bytes, key->size);
}
ufshcd_program_key(hba, &cfg, slot);
@@ -185,6 +185,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
/* UFS only supports 8 bytes for any DUN */
hba->crypto_profile.max_dun_bytes_supported = 8;
+ hba->crypto_profile.key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
hba->crypto_profile.dev = hba->dev;
/*
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 786f20ef2238..d0a2c963a27d 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
@@ -117,11 +118,12 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
-static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
- bool up, enum ufs_notify_change_status status)
+static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up,
+ unsigned long target_freq,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->clk_scale_notify)
- return hba->vops->clk_scale_notify(hba, up, status);
+ return hba->vops->clk_scale_notify(hba, up, target_freq, status);
return 0;
}
@@ -159,9 +161,9 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
}
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
if (hba->vops && hba->vops->pwr_change_notify)
return hba->vops->pwr_change_notify(hba, status,
@@ -270,6 +272,14 @@ static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
+{
+ if (hba->vops && hba->vops->freq_to_gear_speed)
+ return hba->vops->freq_to_gear_speed(hba, freq);
+
+ return 0;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 1893a7ad9531..50adfb8b335b 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -53,7 +53,7 @@
/* UIC command timeout, unit: ms */
enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
- UIC_CMD_TIMEOUT_MAX = 2000,
+ UIC_CMD_TIMEOUT_MAX = 5000,
};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
@@ -63,7 +63,11 @@ enum {
/* Query request retries */
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+enum {
+ QUERY_REQ_TIMEOUT_MIN = 1,
+ QUERY_REQ_TIMEOUT_DEFAULT = 1500,
+ QUERY_REQ_TIMEOUT_MAX = 30000
+};
/* Advanced RPMB request timeout */
#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
@@ -133,7 +137,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = {
module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
MODULE_PARM_DESC(uic_cmd_timeout,
- "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively");
+
+static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT;
+
+static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN,
+ QUERY_REQ_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops dev_cmd_timeout_ops = {
+ .set = dev_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644);
+MODULE_PARM_DESC(dev_cmd_timeout,
+ "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
@@ -266,7 +287,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
+ return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -278,6 +299,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+ UFS_DEVICE_QUIRK_PA_HIBER8TIME |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
@@ -369,7 +391,7 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
else
header = &hba->lrb[tag].ucd_rsp_ptr->header;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
+ trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
@@ -380,7 +402,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
if (!trace_ufshcd_upiu_enabled())
return;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
+ trace_ufshcd_upiu(hba, str_t, &rq_rsp->header,
&rq_rsp->qr, UFS_TSF_OSF);
}
@@ -393,12 +415,12 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
return;
if (str_t == UFS_TM_SEND)
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_req.req_header,
&descp->upiu_req.input_param1,
UFS_TSF_TM_INPUT);
else
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_rsp.rsp_header,
&descp->upiu_rsp.output_param1,
UFS_TSF_TM_OUTPUT);
@@ -418,7 +440,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
- trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
+ trace_ufshcd_uic_command(hba, str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
@@ -431,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
u8 opcode = 0, group_id = 0;
u32 doorbell = 0;
u32 intr;
- int hwq_id = -1;
+ u32 hwq_id = 0;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -473,7 +495,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
- trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
+ trace_ufshcd_command(cmd->device, hba, str_t, tag, doorbell, hwq_id,
transfer_len, intr, lba, opcode, group_id);
}
@@ -628,8 +650,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
+ scsi_host_busy(hba->host), hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -643,9 +665,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
- dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- div_u64(hba->ufs_stats.last_intr_ts, 1000),
- hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
@@ -1063,7 +1082,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->max_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled up", clki->name,
clki->curr_freq,
clki->max_freq);
@@ -1081,7 +1100,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->min_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled down", clki->name,
clki->curr_freq,
clki->min_freq);
@@ -1122,7 +1141,7 @@ int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
return ret;
}
- trace_ufshcd_clk_scaling(dev_name(dev),
+ trace_ufshcd_clk_scaling(hba,
(scaling_down ? "scaled down" : "scaled up"),
clki->name, hba->clk_scaling.target_freq, freq);
}
@@ -1162,7 +1181,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
int ret = 0;
ktime_t start = ktime_get();
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE);
if (ret)
goto out;
@@ -1173,7 +1192,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
if (ret)
goto out;
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE);
if (ret) {
if (hba->use_pm_opp)
ufshcd_opp_set_rate(hba,
@@ -1186,7 +1205,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
ufshcd_pm_qos_update(hba, scale_up);
out:
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -1313,16 +1332,26 @@ out:
/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
+ * @target_gear: target gear to scale to
* @scale_up: True for scaling up gear and false for scaling down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time;
* non-zero for any other errors.
*/
-static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up)
{
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
+ if (target_gear) {
+ new_pwr_info = hba->pwr_info;
+ new_pwr_info.gear_tx = target_gear;
+ new_pwr_info.gear_rx = target_gear;
+
+ goto config_pwr_mode;
+ }
+
+ /* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
sizeof(struct ufs_pa_layer_attr));
@@ -1343,6 +1372,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
}
+config_pwr_mode:
/* check if the power mode needs to be changed or not? */
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
@@ -1367,6 +1397,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
+ mutex_lock(&hba->host->scan_mutex);
blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
@@ -1377,6 +1408,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
goto out;
}
@@ -1387,17 +1419,18 @@ out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
- /* Enable Write Booster if we have scaled up else disable it */
+ /* Enable Write Booster if current gear requires it else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
- ufshcd_wb_toggle(hba, scale_up);
+ ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
ufshcd_release(hba);
}
@@ -1413,15 +1446,19 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
bool scale_up)
{
+ u32 old_gear = hba->pwr_info.gear_rx;
+ u32 new_gear = 0;
int ret = 0;
+ new_gear = ufshcd_vops_freq_to_gear_speed(hba, freq);
+
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
- ret = ufshcd_scale_gear(hba, false);
+ ret = ufshcd_scale_gear(hba, new_gear, false);
if (ret)
goto out_unprepare;
}
@@ -1429,13 +1466,13 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
ret = ufshcd_scale_clks(hba, freq, scale_up);
if (ret) {
if (!scale_up)
- ufshcd_scale_gear(hba, true);
+ ufshcd_scale_gear(hba, old_gear, true);
goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
- ret = ufshcd_scale_gear(hba, true);
+ ret = ufshcd_scale_gear(hba, new_gear, true);
if (ret) {
ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
false);
@@ -1444,7 +1481,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
}
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ ufshcd_clock_scaling_unprepare(hba, ret);
return ret;
}
@@ -1548,7 +1585,7 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!ret)
hba->clk_scaling.target_freq = *freq;
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@@ -1720,6 +1757,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_info *clki;
+ unsigned long freq;
u32 value;
int err = 0;
@@ -1743,14 +1782,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value) {
ufshcd_resume_clkscaling(hba);
- } else {
- ufshcd_suspend_clkscaling(hba);
- err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
- if (err)
- dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
- __func__, err);
+ goto out_rel;
}
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ freq = clki->max_freq;
+
+ ufshcd_suspend_clkscaling(hba);
+
+ if (!ufshcd_is_devfreq_scaling_required(hba, freq, true))
+ goto out_rel;
+
+ err = ufshcd_devfreq_scale(hba, freq, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ else
+ hba->clk_scaling.target_freq = freq;
+
+out_rel:
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
out:
@@ -1783,6 +1833,10 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
if (!hba->clk_scaling.min_gear)
hba->clk_scaling.min_gear = UFS_HS_G1;
+ if (!hba->clk_scaling.wb_gear)
+ /* Use intermediate gear speed HS_G3 as the default wb_gear */
+ hba->clk_scaling.wb_gear = UFS_HS_G3;
+
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
@@ -1881,7 +1935,7 @@ start:
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
break;
}
@@ -1893,7 +1947,7 @@ start:
fallthrough;
case CLKS_OFF:
hba->clk_gating.state = REQ_CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
@@ -1933,7 +1987,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.is_suspended ||
hba->clk_gating.state != REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
return;
}
@@ -1955,7 +2009,7 @@ static void ufshcd_gate_work(struct work_struct *work)
hba->clk_gating.state = CLKS_ON;
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
return;
}
@@ -1980,7 +2034,7 @@ static void ufshcd_gate_work(struct work_struct *work)
guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
}
@@ -2006,7 +2060,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
}
hba->clk_gating.state = REQ_CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ trace_ufshcd_clk_gating(hba, hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
@@ -3144,16 +3198,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
int err;
retry:
- time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
time_left);
if (likely(time_left)) {
- /*
- * The completion handler called complete() and the caller of
- * this function still owns the @lrbp tag so the code below does
- * not trigger any race conditions.
- */
- hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
@@ -3167,7 +3215,6 @@ retry:
/* successfully cleared the command, retry if needed */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
err = -EAGAIN;
- hba->dev_cmd.complete = NULL;
return err;
}
@@ -3183,11 +3230,9 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending) {
- hba->dev_cmd.complete = NULL;
+ if (pending)
__clear_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- }
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3205,8 +3250,6 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending)
- hba->dev_cmd.complete = NULL;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3240,13 +3283,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
{
- DECLARE_COMPLETION_ONSTACK(wait);
int err;
- hba->dev_cmd.complete = &wait;
-
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -3347,7 +3386,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, selector = 0;
- int timeout = QUERY_REQ_TIMEOUT;
+ int timeout = dev_cmd_timeout;
BUG_ON(!hba);
@@ -3444,7 +3483,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -3540,7 +3579,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -4005,7 +4044,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_reset(struct ufs_hba *hba)
+int ufshcd_dme_reset(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_RESET,
@@ -4019,6 +4058,7 @@ static int ufshcd_dme_reset(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_reset);
int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
@@ -4044,7 +4084,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_enable(struct ufs_hba *hba)
+int ufshcd_dme_enable(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_ENABLE,
@@ -4058,6 +4098,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_enable);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -4422,7 +4463,7 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ trace_ufshcd_profile_hibern8(hba, "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret)
@@ -4447,7 +4488,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ trace_ufshcd_profile_hibern8(hba, "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
@@ -5551,12 +5592,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- } else if (hba->dev_cmd.complete) {
+ } else {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
- complete(hba->dev_cmd.complete);
+ complete(&hba->dev_cmd.complete);
}
}
@@ -5658,6 +5699,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
continue;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ continue;
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -5808,7 +5851,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = true;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
+ trace_ufshcd_auto_bkops_state(hba, "Enabled");
/* No need of URGENT_BKOPS exception from the device */
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -5859,7 +5902,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = false;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ trace_ufshcd_auto_bkops_state(hba, "Disabled");
hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
@@ -5979,6 +6022,42 @@ out:
__func__, err);
}
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
+{
+ struct utp_upiu_query_v4_0 *upiu_resp;
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+
+ ufshcd_init_query(hba, &request, &response,
+ UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0);
+
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
+
+ if (err) {
+ dev_err(hba->dev, "%s: failed to read device level exception %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ upiu_resp = (struct utp_upiu_query_v4_0 *)response;
+ *exception_id = get_unaligned_be64(&upiu_resp->osf3);
+out:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ return err;
+}
+
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6049,7 +6128,22 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
-static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode)
+{
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
u32 cur_buf;
@@ -6131,15 +6225,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
}
/*
- * The ufs device needs the vcc to be ON to flush.
* With user-space reduction enabled, it's enough to enable flush
* by checking only the available buffer. The threshold
* defined here is > 90% full.
* With user-space preserved enabled, the current-buffer
* should be checked too because the wb buffer size can reduce
* when disk tends to be full. This info is provided by current
- * buffer (dCurrentWriteBoosterBufferSize). There's no point in
- * keeping vcc on when current buffer is empty.
+ * buffer (dCurrentWriteBoosterBufferSize).
*/
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -6154,7 +6246,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
if (!hba->dev_info.b_presrv_uspc_en)
return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
- return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+ return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf);
}
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
@@ -6193,7 +6285,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
return;
}
- trace_ufshcd_exception_event(dev_name(hba->dev), status);
+ trace_ufshcd_exception_event(hba, status);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
@@ -6201,6 +6293,16 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
+ if (status & hba->ee_drv_mask & MASK_EE_HEALTH_CRITICAL) {
+ hba->critical_health_count++;
+ sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
+ }
+
+ if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) {
+ atomic_inc(&hba->dev_lvl_exception_count);
+ sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count");
+ }
+
ufs_debugfs_exception_event(hba, status);
}
@@ -6506,7 +6608,7 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
dev_info(hba->dev,
- "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
+ "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n",
__func__, ufshcd_state_name[hba->ufshcd_state],
hba->is_powered, hba->shutting_down, hba->saved_err,
hba->saved_uic_err, hba->force_reset,
@@ -6521,9 +6623,14 @@ static void ufshcd_err_handler(struct work_struct *work)
up(&hba->host_sem);
return;
}
- ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ufshcd_err_handling_prepare(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -6935,7 +7042,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
}
/**
- * ufshcd_intr - Main interrupt service routine
+ * ufshcd_threaded_intr - Threaded interrupt service routine
* @irq: irq number
* @__hba: pointer to adapter instance
*
@@ -6943,16 +7050,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_intr(int irq, void *__hba)
+static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status = 0;
+ u32 last_intr_status, intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = local_clock();
+ last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -6976,7 +7081,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
- hba->ufs_stats.last_intr_status,
+ last_intr_status,
enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -6984,6 +7089,29 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
return retval;
}
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Return:
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_WAKE_THREAD - If handling is moved to threaded handled
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+
+ /* Move interrupt handling to thread when MCQ & ESI are not enabled */
+ if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
+ return IRQ_WAKE_THREAD;
+
+ /* Directly handle interrupts since MCQ ESI handlers does the hard job */
+ return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) &
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE));
+}
+
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
@@ -7179,7 +7307,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
- ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
+ ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -7199,8 +7327,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
err = -EINVAL;
}
}
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
return err;
}
@@ -7652,7 +7778,7 @@ static void ufshcd_process_probe_result(struct ufs_hba *hba,
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- trace_ufshcd_init(dev_name(hba->dev), ret,
+ trace_ufshcd_init(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), probe_start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
}
@@ -7681,7 +7807,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, ULONG_MAX, true);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_scale_clks(hba, ULONG_MAX, true);
err = ufshcd_hba_enable(hba);
@@ -8043,6 +8170,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
*/
dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->ext_wb_sup = get_unaligned_be16(desc_buf +
+ DEVICE_DESC_PARAM_EXT_WB_SUP);
+
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
@@ -8100,6 +8230,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u32 ext_ufs_feature;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP))
+ return;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+ ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION);
+}
+
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8293,8 +8439,15 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (dev_info->wspecversion >= 0x410) {
+ hba->critical_health_count = 0;
+ ufshcd_enable_ee(hba, MASK_EE_HEALTH_CRITICAL);
+ }
+
ufs_init_rtc(hba, desc_buf);
+ ufshcd_device_lvl_exception_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -8384,6 +8537,31 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
+ * to ensure proper hibernation timing. This function retrieves the current
+ * PA_HIBERN8TIME value and increments it by 100us.
+ */
+static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
+{
+ u32 pa_h8time;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
+ return;
+ }
+
+ /* Increment by 1 to increase hibernation time by 100 µs */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
+ if (ret)
+ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
ufshcd_vops_apply_dev_quirks(hba);
@@ -8394,6 +8572,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
+ ufshcd_quirk_override_pa_h8time(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -8563,7 +8744,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
@@ -8678,6 +8859,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
+ hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
intrs = UFSHCD_ENABLE_MCQ_INTRS;
@@ -8882,7 +9064,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
__func__, hba->outstanding_tasks);
- return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -9148,12 +9330,12 @@ out:
} else if (!ret && on && hba->clk_gating.is_initialized) {
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
if (clk_state_changed)
- trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_gating(hba,
(on ? "on" : "off"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -9853,7 +10035,7 @@ static int ufshcd_wl_runtime_suspend(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9873,7 +10055,7 @@ static int ufshcd_wl_runtime_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9905,7 +10087,7 @@ static int ufshcd_wl_suspend(struct device *dev)
out:
if (!ret)
hba->is_sys_suspended = true;
- trace_ufshcd_wl_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9928,7 +10110,7 @@ static int ufshcd_wl_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
out:
- trace_ufshcd_wl_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
@@ -9966,7 +10148,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
}
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
@@ -10039,7 +10221,7 @@ int ufshcd_system_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
out:
- trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_system_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10067,7 +10249,7 @@ int ufshcd_system_resume(struct device *dev)
ret = ufshcd_resume(hba);
out:
- trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_system_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -10093,7 +10275,7 @@ int ufshcd_runtime_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
- trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10120,7 +10302,7 @@ int ufshcd_runtime_resume(struct device *dev)
ret = ufshcd_resume(hba);
- trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10431,6 +10613,23 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
spin_lock_init(&hba->clk_gating.lock);
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Host controller drivers can override them in their
+ * 'ufs_hba_variant_ops::init' callback.
+ *
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
+ init_completion(&hba->dev_cmd.complete);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -10522,7 +10721,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
- err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr,
+ IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
goto out_disable;
@@ -10544,21 +10744,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
- /*
- * Set the default power management level for runtime and system PM if
- * not set by the host controller drivers.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- if (!hba->rpm_lvl)
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- if (!hba->spm_lvl)
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);