summaryrefslogtreecommitdiff
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 21:49:33 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 21:49:33 +0300
commita829a8445f09036404060f4d6489cb13433f4304 (patch)
tree60067e1425239a9f372c10100ede39691c3d612b /drivers/scsi/ufs
parent84b6079134420f4635f23c2088a3892057b23bb0 (diff)
parentf5b893c947151d424a4ab55ea3a8544b81974b31 (diff)
downloadlinux-a829a8445f09036404060f4d6489cb13433f4304.tar.xz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This update includes the usual round of major driver updates (ncr5380, lpfc, hisi_sas, megaraid_sas, ufs, ibmvscsis, mpt3sas). There's also an assortment of minor fixes, mostly in error legs or other not very user visible stuff. The major change is the pci_alloc_irq_vectors replacement for the old pci_msix_.. calls; this effectively makes IRQ mapping generic for the drivers and allows blk_mq to use the information" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (256 commits) scsi: qla4xxx: switch to pci_alloc_irq_vectors scsi: hisi_sas: support deferred probe for v2 hw scsi: megaraid_sas: switch to pci_alloc_irq_vectors scsi: scsi_devinfo: remove synchronous ALUA for NETAPP devices scsi: be2iscsi: set errno on error path scsi: be2iscsi: set errno on error path scsi: hpsa: fallback to use legacy REPORT PHYS command scsi: scsi_dh_alua: Fix RCU annotations scsi: hpsa: use %phN for short hex dumps scsi: hisi_sas: fix free'ing in probe and remove scsi: isci: switch to pci_alloc_irq_vectors scsi: ipr: Fix runaway IRQs when falling back from MSI to LSI scsi: dpt_i2o: double free on error path scsi: cxlflash: Migrate scsi command pointer to AFU command scsi: cxlflash: Migrate IOARRIN specific routines to function pointers scsi: cxlflash: Cleanup queuecommand() scsi: cxlflash: Cleanup send_tmf() scsi: cxlflash: Remove AFU command lock scsi: cxlflash: Wait for active AFU commands to timeout upon tear down scsi: cxlflash: Remove private command pool ...
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c39
-rw-r--r--drivers/scsi/ufs/ufs.h5
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c482
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/unipro.h4
9 files changed, 449 insertions, 146 deletions
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73f1131..aa43bfea0d00 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1094,10 +1094,12 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* ufs_qcom_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
*
* Returns 0 on success, non-zero on failure.
*/
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@@ -1111,18 +1113,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (!host)
return 0;
- if (on) {
- err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
- if (err)
- goto out;
+ if (on && (status == POST_CHANGE)) {
+ phy_power_on(host->generic_phy);
- err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
- if (err) {
- dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
- __func__, err);
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- goto out;
- }
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1130,14 +1123,15 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
if (vote == host->bus_vote.min_bw_vote)
ufs_qcom_update_bus_bw_vote(host);
- } else {
-
- /* M-PHY RMMI interface clocks can be turned off */
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba))
+ } else if (!on && (status == PRE_CHANGE)) {
+ if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
+ /* powering off PHY during aggressive clk gating */
+ phy_power_off(host->generic_phy);
+ }
+
vote = host->bus_vote.min_bw_vote;
}
@@ -1146,7 +1140,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "%s: set bus vote failed %d\n",
__func__, err);
-out:
return err;
}
@@ -1204,12 +1197,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out;
+ goto out_variant_clear;
}
err = ufs_qcom_bus_register(host);
if (err)
- goto out_host_free;
+ goto out_variant_clear;
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
@@ -1254,7 +1247,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
- ufs_qcom_setup_clocks(hba, true);
+ ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
@@ -1274,8 +1267,7 @@ out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
phy_exit(host->generic_phy);
-out_host_free:
- devm_kfree(dev, host);
+out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
return err;
@@ -1287,6 +1279,7 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
ufs_qcom_disable_lane_clks(host);
phy_power_off(host->generic_phy);
+ phy_exit(host->generic_phy);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 845b874e2977..8e6709a3fb6b 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,6 +46,7 @@
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -162,7 +163,7 @@ enum desc_header_offset {
};
enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+ QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
QUERY_DESC_UNIT_MAX_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
@@ -416,7 +417,7 @@ struct utp_cmd_rsp {
__be32 residual_transfer_count;
__be32 reserved[4];
__be16 sense_data_len;
- u8 sense_data[18];
+ u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 22f881e9253a..f7983058f3f7 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -128,6 +128,13 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
+
+
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
@@ -140,6 +147,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa466c59..52b546fb509b 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
+ ufshcd_dealloc_host(hba);
}
/**
@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
+ ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index db53f38da864..a72a4ba78125 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
if (ret) {
dev_err(dev, "%s: unable to find %s err %d\n",
__func__, prop_name, ret);
- goto out_free;
+ goto out;
}
vreg->min_uA = 0;
@@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
goto out;
-out_free:
- devm_kfree(dev, vreg);
- vreg = NULL;
out:
if (!ret)
*out_vreg = vreg;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index cf549871c1ee..ef8548c3a423 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,8 @@
#include "ufs_quirks.h"
#include "unipro.h"
+#define UFSHCD_REQ_SENSE_SIZE 18
+
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
@@ -57,15 +59,9 @@
#define NOP_OUT_TIMEOUT 30 /* msecs */
/* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
-/*
- * Query request timeout for fDeviceInit flag
- * fDeviceInit query response time for some devices is too large that default
- * QUERY_REQ_TIMEOUT may not be enough for such devices.
- */
-#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -123,6 +119,7 @@ enum {
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
+ UFSHCD_STATE_EH_SCHEDULED,
};
/* UFSHCD error handling flags */
@@ -598,6 +595,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba)) {
+ devfreq_suspend_device(hba->devfreq);
+ hba->clk_scaling.window_start_t = 0;
+ }
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ if (ufshcd_is_clkscaling_enabled(hba))
+ devfreq_resume_device(hba->devfreq);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -631,8 +642,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
scsi_unblock_requests(hba->host);
}
@@ -660,6 +670,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -709,7 +734,14 @@ static void ufshcd_gate_work(struct work_struct *work)
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.is_suspended) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state == REQ_CLKS_ON)) {
hba->clk_gating.state = CLKS_ON;
goto rel_lock;
}
@@ -731,10 +763,7 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
+ ufshcd_suspend_clkscaling(hba);
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
@@ -878,6 +907,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
}
/**
@@ -889,10 +920,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
int len;
if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
- min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
}
}
@@ -1088,7 +1123,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*
* Returns 0 in case of success, non-zero value in case of failure
*/
-static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufshcd_sg_entry *prd_table;
struct scatterlist *sg;
@@ -1102,8 +1137,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
return sg_segments;
if (sg_segments) {
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16)(sg_segments *
+ sizeof(struct ufshcd_sg_entry)));
+ else
+ lrbp->utr_descriptor_ptr->prd_table_length =
+ cpu_to_le16((u16) (sg_segments));
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
@@ -1410,6 +1450,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
+ case UFSHCD_STATE_EH_SCHEDULED:
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_unlock;
@@ -1457,7 +1498,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
- lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -1465,15 +1506,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_comp_scsi_upiu(hba, lrbp);
- err = ufshcd_map_sg(lrbp);
+ err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1581,6 +1625,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
msecs_to_jiffies(max_timeout));
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
hba->dev_cmd.complete = NULL;
if (likely(time_left)) {
@@ -1683,6 +1729,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1789,9 +1836,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
- timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
-
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
@@ -1861,8 +1905,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -1961,8 +2005,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode, idn, index, err);
goto out_unlock;
}
@@ -2055,18 +2099,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
desc_id, desc_index, 0, desc_buf,
&buff_len);
- if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
- (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
- ufs_query_desc_max_size[desc_id])
- || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
- __func__, desc_id, param_offset, buff_len, ret);
- if (!ret)
- ret = -EINVAL;
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * While reading variable size descriptors (like string descriptor),
+ * some UFS devices may report the "LENGTH" (field in "Transaction
+ * Specific fields" of Query Response UPIU) same as what was requested
+ * in Query Request UPIU instead of reporting the actual size of the
+ * variable size descriptor.
+ * Although it's safe to ignore the "LENGTH" field for variable size
+ * descriptors as we can always derive the length of the descriptor from
+ * the descriptor header fields. Hence this change impose the length
+ * match check only for fixed size descriptors (for which we always
+ * request the correct size as part of Query Request UPIU).
+ */
+ if ((desc_id != QUERY_DESC_IDN_STRING) &&
+ (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+ dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+ __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
out:
@@ -2088,7 +2155,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
u8 *buf,
u32 size)
{
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ int err = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+ if (!err)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+
+ return err;
}
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@ -2320,12 +2398,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
/* Response upiu and prdt offset should be in double words */
- utrdlp[i].response_upiu_offset =
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+ utrdlp[i].response_upiu_offset =
+ cpu_to_le16(response_offset);
+ utrdlp[i].prd_table_offset =
+ cpu_to_le16(prdt_offset);
+ utrdlp[i].response_upiu_length =
+ cpu_to_le16(ALIGNED_UPIU_SIZE);
+ } else {
+ utrdlp[i].response_upiu_offset =
cpu_to_le16((response_offset >> 2));
- utrdlp[i].prd_table_offset =
+ utrdlp[i].prd_table_offset =
cpu_to_le16((prdt_offset >> 2));
- utrdlp[i].response_upiu_length =
+ utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+ }
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
hba->lrb[i].ucd_req_ptr =
@@ -2429,10 +2516,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val,
- retries);
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
return ret;
}
@@ -2496,9 +2583,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
get, UIC_GET_ATTR_ID(attr_sel), ret);
} while (ret && peer && --retries);
- if (!retries)
+ if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
- get, UIC_GET_ATTR_ID(attr_sel), retries);
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
@@ -2651,6 +2739,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
int ret;
struct uic_command uic_cmd = {0};
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
@@ -2664,7 +2754,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
*/
if (ufshcd_link_recovery(hba))
ret = -ENOLINK;
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+ POST_CHANGE);
return ret;
}
@@ -2687,13 +2779,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
struct uic_command uic_cmd = {0};
int ret;
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
ret = ufshcd_link_recovery(hba);
- }
+ } else
+ ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+ POST_CHANGE);
return ret;
}
@@ -2725,8 +2821,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FASTAUTO_MODE;
- pwr_info->pwr_rx = FASTAUTO_MODE;
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -2757,7 +2853,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_rx);
return -EINVAL;
}
- pwr_info->pwr_rx = SLOWAUTO_MODE;
+ pwr_info->pwr_rx = SLOW_MODE;
}
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2770,7 +2866,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_tx);
return -EINVAL;
}
- pwr_info->pwr_tx = SLOWAUTO_MODE;
+ pwr_info->pwr_tx = SLOW_MODE;
}
hba->max_pwr_info.is_valid = true;
@@ -3090,7 +3186,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+link_startup:
do {
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
@@ -3116,6 +3221,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
/* failed to get the link up... retire */
goto out;
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
ret = ufshcd_disable_device_tx_lcc(hba);
if (ret)
@@ -3181,16 +3292,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
{
int ret = 0;
u8 lun_qdepth;
+ int retries;
struct ufs_hba *hba;
hba = shost_priv(sdev->host);
lun_qdepth = hba->nutrs;
- ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Read descriptor*/
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
+ if (!ret || ret == -ENOTSUPP)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
+ }
/* Some WLUN doesn't support unit descriptor */
if (ret == -EOPNOTSUPP)
@@ -4097,6 +4216,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ /* Ignore LINERESET indication, as this is not an error */
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
@@ -4158,7 +4288,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
/* block commands from scsi mid-layer */
scsi_block_requests(hba->host);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
schedule_work(&hba->eh_work);
}
}
@@ -4311,6 +4441,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
task_req_upiup->input_param1 = cpu_to_be32(lun_id);
task_req_upiup->input_param2 = cpu_to_be32(task_id);
+ ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
@@ -4318,6 +4450,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
wmb();
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
spin_unlock_irqrestore(host->host_lock, flags);
@@ -4722,6 +4856,24 @@ out:
return icc_level;
}
+static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
+{
+ int ret = 0;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* write attribute */
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
+ if (!ret)
+ break;
+
+ dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
@@ -4742,9 +4894,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ ret = ufshcd_set_icc_levels_attr(hba,
+ hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -4965,6 +5116,76 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -4975,6 +5196,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
}
/**
@@ -5027,9 +5251,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
__func__);
} else {
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret)
+ if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ goto out;
+ }
}
/* set the state as operational after switching to desired gear */
@@ -5062,8 +5288,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
hba->is_init_prefetch = true;
/* Resume devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
out:
/*
@@ -5389,6 +5614,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (!head || list_empty(head))
goto out;
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+ if (ret)
+ return ret;
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
@@ -5410,7 +5639,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- ret = ufshcd_vops_setup_clocks(hba, on);
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+ if (ret)
+ return ret;
+
out:
if (ret) {
list_for_each_entry(clki, head, list) {
@@ -5500,8 +5732,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
if (!hba->vops)
return;
- ufshcd_vops_setup_clocks(hba, false);
-
ufshcd_vops_setup_regulators(hba, false);
ufshcd_vops_exit(hba);
@@ -5564,6 +5794,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -5577,19 +5808,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
0,
0,
0,
- SCSI_SENSE_BUFFERSIZE,
+ UFSHCD_REQ_SENSE_SIZE,
0};
char *buffer;
int ret;
- buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
- SCSI_SENSE_BUFFERSIZE, NULL,
+ UFSHCD_REQ_SENSE_SIZE, NULL,
msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -5766,7 +5997,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
!hba->dev_info.is_lu_power_on_wp) {
ret = ufshcd_setup_vreg(hba, true);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
- ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
if (!ret && !ufshcd_is_link_active(hba)) {
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
if (ret)
@@ -5775,6 +6005,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
if (ret)
goto vccq_lpm;
}
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
}
goto out;
@@ -5839,6 +6070,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
+ ufshcd_suspend_clkscaling(hba);
+
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
goto disable_clks;
@@ -5846,12 +6079,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto out;
+ goto enable_gating;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto out;
+ goto enable_gating;
}
if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5888,15 +6121,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
disable_clks:
/*
- * The clock scaling needs access to controller registers. Hence, Wait
- * for pending clock scaling work to be done before clocks are
- * turned off.
- */
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
- /*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
* host clocks are ON.
@@ -5905,10 +6129,6 @@ disable_clks:
if (ret)
goto set_link_active;
- ret = ufshcd_vops_setup_clocks(hba, false);
- if (ret)
- goto vops_resume;
-
if (!ufshcd_is_link_active(hba))
ufshcd_setup_clocks(hba, false);
else
@@ -5925,9 +6145,8 @@ disable_clks:
ufshcd_hba_vreg_set_lpm(hba);
goto out;
-vops_resume:
- ufshcd_vops_resume(hba, pm_op);
set_link_active:
+ ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
@@ -5937,6 +6156,7 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
+ ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
out:
@@ -6015,8 +6235,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_urgent_bkops(hba);
hba->clk_gating.is_suspended = false;
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ ufshcd_resume_clkscaling(hba);
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -6030,6 +6249,7 @@ disable_vreg:
ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
+ ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
out:
hba->pm_op_in_progress = 0;
@@ -6052,16 +6272,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if (!hba || !hba->is_powered)
return 0;
- if (pm_runtime_suspended(hba->dev)) {
- if (hba->rpm_lvl == hba->spm_lvl)
- /*
- * There is possibility that device may still be in
- * active state during the runtime suspend.
- */
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
- goto out;
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+ if (pm_runtime_suspended(hba->dev)) {
/*
* UFS device and/or UFS link low power states during runtime
* suspend seems to be different than what is expected during
@@ -6092,7 +6309,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
* Let the runtime resume take care of resuming
* if runtime suspended.
@@ -6113,7 +6333,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@ -6143,10 +6366,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
- else
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
+
+ return ufshcd_resume(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -6198,11 +6424,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
- scsi_host_put(hba->host);
-
ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_remove_device(hba->devfreq);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6324,15 +6546,47 @@ static int ufshcd_devfreq_target(struct device *dev,
{
int err = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
+ bool release_clk_hold = false;
+ unsigned long irq_flags;
if (!ufshcd_is_clkscaling_enabled(hba))
return -EINVAL;
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (ufshcd_is_clkgating_allowed(hba) &&
+ (hba->clk_gating.state != CLKS_ON)) {
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /* hold the vote until the scaling work is completed */
+ hba->clk_gating.active_reqs++;
+ release_clk_hold = true;
+ hba->clk_gating.state = CLKS_ON;
+ } else {
+ /*
+ * Clock gating work seems to be running in parallel
+ * hence skip scaling work to avoid deadlock between
+ * current scaling work and gating work.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
if (*freq == UINT_MAX)
err = ufshcd_scale_clks(hba, true);
else if (*freq == 0)
err = ufshcd_scale_clks(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (release_clk_hold)
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
return err;
}
@@ -6498,7 +6752,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
if (ufshcd_is_clkscaling_enabled(hba)) {
- hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+ hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
"simple_ondemand", NULL);
if (IS_ERR(hba->devfreq)) {
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
@@ -6507,18 +6761,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host;
}
/* Suspend devfreq until the UFS device is detected */
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
+ ufshcd_suspend_clkscaling(hba);
}
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
/*
- * The device-initialize-sequence hasn't been invoked yet.
- * Set the device to power-off state
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
*/
- ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_ufs_dev_active(hba);
async_schedule(ufshcd_async_scan, hba);
@@ -6530,7 +6785,6 @@ exit_gating:
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
- scsi_host_put(host);
ufshcd_hba_exit(hba);
out_error:
return err;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 430bef111293..7d9ff22acfea 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -261,6 +261,12 @@ struct ufs_pwr_mode_info {
* @pwr_change_notify: called before and after a power mode change
* is carried out to allow vendor spesific capabilities
* to be set.
+ * @setup_xfer_req: called before any transfer request is issued
+ * to set some things
+ * @setup_task_mgmt: called before any task management request is issued
+ * to set some things
+ * @hibern8_notify: called around hibern8 enter/exit
+ * to configure some things
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
@@ -273,7 +279,8 @@ struct ufs_hba_variant_ops {
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*clk_scale_notify)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool);
+ int (*setup_clocks)(struct ufs_hba *, bool,
+ enum ufs_notify_change_status);
int (*setup_regulators)(struct ufs_hba *, bool);
int (*hce_enable_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
@@ -283,6 +290,10 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
+ void (*setup_xfer_req)(struct ufs_hba *, int, bool);
+ void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
+ void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+ enum ufs_notify_change_status);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -474,6 +485,12 @@ struct ufs_hba {
*/
#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
+ /*
+ * This quirk needs to be enabled if the host contoller regards
+ * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+ */
+ #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
+
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
@@ -755,10 +772,11 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
return 0;
}
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->setup_clocks)
- return hba->vops->setup_clocks(hba, on);
+ return hba->vops->setup_clocks(hba, on, status);
return 0;
}
@@ -799,6 +817,28 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
return -ENOTSUPP;
}
+static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd)
+{
+ if (hba->vops && hba->vops->setup_xfer_req)
+ return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+}
+
+static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
+ int tag, u8 tm_function)
+{
+ if (hba->vops && hba->vops->setup_task_mgmt)
+ return hba->vops->setup_task_mgmt(hba, tag, tm_function);
+}
+
+static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ if (hba->vops && hba->vops->hibern8_notify)
+ return hba->vops->hibern8_notify(hba, cmd, status);
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741ff606..5d978867be57 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -83,6 +83,8 @@ enum {
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
};
+#define UFS_MASK(mask, offset) ((mask) << (offset))
+
/* UFS Version 08h */
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
@@ -166,6 +168,7 @@ enum {
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index eff8b5675575..23129d7b2678 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -123,6 +123,7 @@
#define PA_MAXRXHSGEAR 0x1587
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
+#define PA_GRANULARITY 0x15AA
#define PA_PACPREQTIMEOUT 0x1590
#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
@@ -158,6 +159,9 @@
#define VS_DEBUGOMC 0xD09E
#define VS_POWERSTATE 0xD083
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4