summaryrefslogtreecommitdiff
path: root/drivers/ufs/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r--drivers/ufs/core/Makefile2
-rw-r--r--drivers/ufs/core/ufs-mcq.c431
-rw-r--r--drivers/ufs/core/ufs_bsg.c144
-rw-r--r--drivers/ufs/core/ufshcd-priv.h109
-rw-r--r--drivers/ufs/core/ufshcd.c918
-rw-r--r--drivers/ufs/core/ufshpb.c4
6 files changed, 1318 insertions, 290 deletions
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index 62f38c5bf857..4d02e0f2de10 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
new file mode 100644
index 000000000000..31df052fbc41
--- /dev/null
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
+ *
+ * Authors:
+ * Asutosh Das <quic_asutoshd@quicinc.com>
+ * Can Guo <quic_cang@quicinc.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "ufshcd-priv.h"
+
+#define MAX_QUEUE_SUP GENMASK(7, 0)
+#define UFS_MCQ_MIN_RW_QUEUES 2
+#define UFS_MCQ_MIN_READ_QUEUES 0
+#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
+#define UFS_MCQ_MIN_POLL_QUEUES 0
+#define QUEUE_EN_OFFSET 31
+#define QUEUE_ID_OFFSET 16
+
+#define MAX_DEV_CMD_ENTRIES 2
+#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
+#define MCQ_QCFG_SIZE 0x40
+#define MCQ_ENTRY_SIZE_IN_DWORD 8
+#define CQE_UCD_BA GENMASK_ULL(63, 7)
+
+static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops rw_queue_count_ops = {
+ .set = rw_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int rw_queues;
+module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
+MODULE_PARM_DESC(rw_queues,
+ "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
+
+static int read_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops read_queue_count_ops = {
+ .set = read_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int read_queues;
+module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
+MODULE_PARM_DESC(read_queues,
+ "Number of interrupt driven read queues used for read. Default value is 0");
+
+static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops poll_queue_count_ops = {
+ .set = poll_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int poll_queues = 1;
+module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
+MODULE_PARM_DESC(poll_queues,
+ "Number of poll queues used for r/w. Default value is 1");
+
+/**
+ * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
+ * @hba: per adapter instance
+ * @max_active_cmds: maximum # of active commands to the device at any time.
+ *
+ * The controller won't send more than the max_active_cmds to the device at
+ * any time.
+ */
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
+{
+ u32 val;
+
+ val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
+ val &= ~MCQ_CFG_MAC_MASK;
+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
+ ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
+}
+
+/**
+ * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
+ * request would be issued.
+ * @hba: per adapter instance
+ * @req: pointer to the request to be issued
+ *
+ * Returns the hardware queue instance on which the request would
+ * be queued.
+ */
+struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req)
+{
+ u32 utag = blk_mq_unique_tag(req);
+ u32 hwq = blk_mq_unique_tag_to_hwq(utag);
+
+ /* uhq[0] is used to serve device commands */
+ return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+}
+
+/**
+ * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * @hba: per adapter instance
+ *
+ * Returns queue-depth on success, non-zero on error
+ *
+ * MAC - Max. Active Command of the Host Controller (HC)
+ * HC wouldn't send more than this commands to the device.
+ * It is mandatory to implement get_hba_mac() to enable MCQ mode.
+ * Calculates and adjusts the queue depth based on the depth
+ * supported by the HC and ufs device.
+ */
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+{
+ int mac;
+
+ /* Mandatory to implement get_hba_mac() */
+ mac = ufshcd_mcq_vops_get_hba_mac(hba);
+ if (mac < 0) {
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ return mac;
+ }
+
+ WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
+ /*
+ * max. value of bqueuedepth = 256, mac is host dependent.
+ * It is mandatory for UFS device to define bQueueDepth if
+ * shared queuing architecture is enabled.
+ */
+ return min_t(int, mac, hba->dev_info.bqueuedepth);
+}
+
+static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
+{
+ int i;
+ u32 hba_maxq, rem, tot_queues;
+ struct Scsi_Host *host = hba->host;
+
+ hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
+
+ tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
+ rw_queues;
+
+ if (hba_maxq < tot_queues) {
+ dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
+ tot_queues, hba_maxq);
+ return -EOPNOTSUPP;
+ }
+
+ rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
+
+ if (rw_queues) {
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ rw_queues = num_possible_cpus();
+ }
+
+ if (poll_queues) {
+ hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_POLL];
+ }
+
+ if (read_queues) {
+ hba->nr_queues[HCTX_TYPE_READ] = read_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_READ];
+ }
+
+ if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
+ num_possible_cpus());
+
+ for (i = 0; i < HCTX_MAX_TYPES; i++)
+ host->nr_hw_queues += hba->nr_queues[i];
+
+ hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
+ return 0;
+}
+
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ size_t utrdl_size, cqe_size;
+ int i;
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+
+ utrdl_size = sizeof(struct utp_transfer_req_desc) *
+ hwq->max_entries;
+ hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
+ &hwq->sqe_dma_addr,
+ GFP_KERNEL);
+ if (!hwq->sqe_dma_addr) {
+ dev_err(hba->dev, "SQE allocation failed\n");
+ return -ENOMEM;
+ }
+
+ cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
+ hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
+ &hwq->cqe_dma_addr,
+ GFP_KERNEL);
+ if (!hwq->cqe_dma_addr) {
+ dev_err(hba->dev, "CQE allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+
+/* Operation and runtime registers configuration */
+#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
+#define MCQ_OPR_OFFSET_n(p, i) \
+ (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
+
+static void __iomem *mcq_opr_base(struct ufs_hba *hba,
+ enum ufshcd_mcq_opr n, int i)
+{
+ struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
+
+ return opr->base + opr->stride * i;
+}
+
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
+{
+ return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
+{
+ writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
+
+/*
+ * Current MCQ specification doesn't provide a Task Tag or its equivalent in
+ * the Completion Queue Entry. Find the Task Tag using an indirect method.
+ */
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq,
+ struct cq_entry *cqe)
+{
+ u64 addr;
+
+ /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
+ BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
+
+ /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
+ addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
+ hba->ucdl_dma_addr;
+
+ return div_u64(addr, sizeof(struct utp_transfer_cmd_desc));
+}
+
+static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
+ int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
+
+ ufshcd_compl_one_cqe(hba, tag, cqe);
+}
+
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs = 0;
+
+ ufshcd_mcq_update_cq_tail_slot(hwq);
+ while (!ufshcd_mcq_is_cq_empty(hwq)) {
+ ufshcd_mcq_process_cqe(hba, hwq);
+ ufshcd_mcq_inc_cq_head_slot(hwq);
+ completed_reqs++;
+ }
+
+ if (completed_reqs)
+ ufshcd_mcq_update_cq_head(hwq);
+
+ return completed_reqs;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
+
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs;
+
+ spin_lock(&hwq->cq_lock);
+ completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ spin_unlock(&hwq->cq_lock);
+
+ return completed_reqs;
+}
+
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ u16 qsize;
+ int i;
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->id = i;
+ qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
+
+ /* Submission Queue Lower Base Address */
+ ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQLBA, i));
+ /* Submission Queue Upper Base Address */
+ ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQUBA, i));
+ /* Submission Queue Doorbell Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
+ MCQ_CFG_n(REG_SQDAO, i));
+ /* Submission Queue Interrupt Status Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
+ MCQ_CFG_n(REG_SQISAO, i));
+
+ /* Completion Queue Lower Base Address */
+ ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQLBA, i));
+ /* Completion Queue Upper Base Address */
+ ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQUBA, i));
+ /* Completion Queue Doorbell Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
+ MCQ_CFG_n(REG_CQDAO, i));
+ /* Completion Queue Interrupt Status Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
+ MCQ_CFG_n(REG_CQISAO, i));
+
+ /* Save the base addresses for quicker access */
+ hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
+ hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
+ hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
+ hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
+
+ /* Reinitializing is needed upon HC reset */
+ hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
+
+ /* Enable Tail Entry Push Status interrupt only for non-poll queues */
+ if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
+ writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
+
+ /* Completion Queue Enable|Size to Completion Queue Attribute */
+ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
+ MCQ_CFG_n(REG_CQATTR, i));
+
+ /*
+ * Submission Qeueue Enable|Size|Completion Queue ID to
+ * Submission Queue Attribute
+ */
+ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
+ (i << QUEUE_ID_OFFSET),
+ MCQ_CFG_n(REG_SQATTR, i));
+ }
+}
+
+void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
+ REG_UFS_MEM_CFG);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
+
+void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
+{
+ ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
+ ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
+
+int ufshcd_mcq_init(struct ufs_hba *hba)
+{
+ struct Scsi_Host *host = hba->host;
+ struct ufs_hw_queue *hwq;
+ int ret, i;
+
+ ret = ufshcd_mcq_config_nr_queues(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_vops_mcq_config_resource(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_mcq_vops_op_runtime_config(hba);
+ if (ret) {
+ dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+ hba->uhq = devm_kzalloc(hba->dev,
+ hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
+ GFP_KERNEL);
+ if (!hba->uhq) {
+ dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->max_entries = hba->nutrs;
+ spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
+ }
+
+ /* The very first HW queue serves device commands */
+ hba->dev_cmd_queue = &hba->uhq[0];
+ /* Give dev_cmd_queue the minimal number of entries */
+ hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
+
+ host->host_tagset = 1;
+ return 0;
+}
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index b99e3f3dc4ef..0d38e7fa34cc 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -6,6 +6,7 @@
*/
#include <linux/bsg-lib.h>
+#include <linux/dma-mapping.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "ufs_bsg.h"
@@ -16,31 +17,11 @@ static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
struct utp_upiu_query *qr)
{
int desc_size = be16_to_cpu(qr->length);
- int desc_id = qr->idn;
if (desc_size <= 0)
return -EINVAL;
- ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
- if (!*desc_len)
- return -EINVAL;
-
- *desc_len = min_t(int, *desc_len, desc_size);
-
- return 0;
-}
-
-static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
- unsigned int request_len,
- unsigned int reply_len)
-{
- int min_req_len = sizeof(struct ufs_bsg_request);
- int min_rsp_len = sizeof(struct ufs_bsg_reply);
-
- if (min_req_len > request_len || min_rsp_len > reply_len) {
- dev_err(hba->dev, "not enough space assigned\n");
- return -EINVAL;
- }
+ *desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size);
return 0;
}
@@ -83,23 +64,84 @@ out:
return 0;
}
+static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job)
+{
+ struct ufs_rpmb_request *rpmb_request = job->request;
+ struct ufs_rpmb_reply *rpmb_reply = job->reply;
+ struct bsg_buffer *payload = NULL;
+ enum dma_data_direction dir;
+ struct scatterlist *sg_list = NULL;
+ int rpmb_req_type;
+ int sg_cnt = 0;
+ int ret;
+ int data_len;
+
+ if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
+ !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
+ return -EINVAL;
+
+ if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
+ return -EINVAL;
+
+ rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type);
+
+ switch (rpmb_req_type) {
+ case UFS_RPMB_WRITE_KEY:
+ case UFS_RPMB_READ_CNT:
+ case UFS_RPMB_PURGE_ENABLE:
+ dir = DMA_NONE;
+ break;
+ case UFS_RPMB_WRITE:
+ case UFS_RPMB_SEC_CONF_WRITE:
+ dir = DMA_TO_DEVICE;
+ break;
+ case UFS_RPMB_READ:
+ case UFS_RPMB_SEC_CONF_READ:
+ case UFS_RPMB_PURGE_STATUS_READ:
+ dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dir != DMA_NONE) {
+ payload = &job->request_payload;
+ if (!payload || !payload->payload_len || !payload->sg_cnt)
+ return -EINVAL;
+
+ sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
+ if (unlikely(!sg_cnt))
+ return -ENOMEM;
+ sg_list = payload->sg_list;
+ data_len = payload->payload_len;
+ }
+
+ ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req,
+ &rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req,
+ &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir);
+
+ if (dir != DMA_NONE) {
+ dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
+
+ if (!ret)
+ rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len;
+ }
+
+ return ret;
+}
+
static int ufs_bsg_request(struct bsg_job *job)
{
struct ufs_bsg_request *bsg_request = job->request;
struct ufs_bsg_reply *bsg_reply = job->reply;
struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
- unsigned int req_len = job->request_len;
- unsigned int reply_len = job->reply_len;
struct uic_command uc = {};
int msgcode;
- uint8_t *desc_buff = NULL;
+ uint8_t *buff = NULL;
int desc_len = 0;
enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
int ret;
-
- ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
- if (ret)
- goto out;
+ bool rpmb = false;
bsg_reply->reply_payload_rcv_len = 0;
@@ -109,34 +151,39 @@ static int ufs_bsg_request(struct bsg_job *job)
switch (msgcode) {
case UPIU_TRANSACTION_QUERY_REQ:
desc_op = bsg_request->upiu_req.qr.opcode;
- ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
- &desc_len, desc_op);
- if (ret) {
- ufshcd_rpm_put_sync(hba);
+ ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op);
+ if (ret)
goto out;
- }
-
fallthrough;
case UPIU_TRANSACTION_NOP_OUT:
case UPIU_TRANSACTION_TASK_REQ:
ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
&bsg_reply->upiu_rsp, msgcode,
- desc_buff, &desc_len, desc_op);
+ buff, &desc_len, desc_op);
if (ret)
- dev_err(hba->dev,
- "exe raw upiu: error code %d\n", ret);
-
+ dev_err(hba->dev, "exe raw upiu: error code %d\n", ret);
+ else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ buff, desc_len);
+ }
break;
case UPIU_TRANSACTION_UIC_CMD:
memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
ret = ufshcd_send_uic_cmd(hba, &uc);
if (ret)
- dev_err(hba->dev,
- "send uic cmd: error code %d\n", ret);
+ dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
break;
+ case UPIU_TRANSACTION_ARPMB_CMD:
+ rpmb = true;
+ ret = ufs_bsg_exec_advanced_rpmb_req(hba, job);
+ if (ret)
+ dev_err(hba->dev, "ARPMB OP failed: error code %d\n", ret);
+ break;
default:
ret = -ENOTSUPP;
dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
@@ -144,22 +191,11 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
- ufshcd_rpm_put_sync(hba);
-
- if (!desc_buff)
- goto out;
-
- if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
- bsg_reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- desc_buff, desc_len);
-
- kfree(desc_buff);
-
out:
+ ufshcd_rpm_put_sync(hba);
+ kfree(buff);
bsg_reply->result = ret;
- job->reply_len = sizeof(struct ufs_bsg_reply);
+ job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
if (ret == 0)
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index a9e8e1f5afe7..529f8507a5e4 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -61,7 +61,24 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
+void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe);
+int ufshcd_mcq_init(struct ufs_hba *hba);
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req);
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+
+#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -70,9 +87,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
@@ -226,6 +240,53 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
+static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->reinit_notify)
+ hba->vops->reinit_notify(hba);
+}
+
+static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->mcq_config_resource)
+ return hba->vops->mcq_config_resource(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->get_hba_mac)
+ return hba->vops->get_hba_mac(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->op_runtime_config)
+ return hba->vops->op_runtime_config(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ if (hba->vops && hba->vops->get_outstanding_cqs)
+ return hba->vops->get_outstanding_cqs(hba, ocqs);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->config_esi)
+ return hba->vops->config_esi(hba);
+
+ return -EOPNOTSUPP;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
@@ -302,4 +363,44 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
+static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
+{
+ u32 mask = q->max_entries - 1;
+ u32 val;
+
+ q->sq_tail_slot = (q->sq_tail_slot + 1) & mask;
+ val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc);
+ writel(val, q->mcq_sq_tail);
+}
+
+static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
+{
+ u32 val = readl(q->mcq_cq_tail);
+
+ q->cq_tail_slot = val / sizeof(struct cq_entry);
+}
+
+static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
+{
+ return q->cq_head_slot == q->cq_tail_slot;
+}
+
+static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
+{
+ q->cq_head_slot++;
+ if (q->cq_head_slot == q->max_entries)
+ q->cq_head_slot = 0;
+}
+
+static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
+{
+ writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head);
+}
+
+static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
+{
+ struct cq_entry *cqe = q->cqe_base_addr;
+
+ return cqe + q->cq_head_slot;
+}
#endif /* _UFSHCD_PRIV_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index e18c9f4463ec..276a82b2e5ee 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -43,6 +43,12 @@
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
+
+#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK |\
+ MCQ_CQ_EVENT_STATUS)
+
+
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
@@ -56,6 +62,9 @@
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+/* Advanced RPMB request timeout */
+#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
+
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -89,6 +98,33 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
+static bool use_mcq_mode = true;
+
+static bool is_mcq_supported(struct ufs_hba *hba)
+{
+ return hba->mcq_sup && use_mcq_mode;
+}
+
+static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct kernel_param_ops mcq_mode_ops = {
+ .set = param_set_mcq_mode,
+ .get = param_get_bool,
+};
+
+module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
+MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -528,7 +564,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- prdt_length /= sizeof(struct ufshcd_sg_entry);
+ prdt_length /= ufshcd_sg_entry_size(hba);
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
@@ -537,7 +573,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
- sizeof(struct ufshcd_sg_entry) * prdt_length);
+ ufshcd_sg_entry_size(hba) * prdt_length);
}
}
@@ -740,12 +776,17 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
/**
* ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
* @lrbp: pointer to local command reference block
+ * @cqe: pointer to the completion queue entry
*
* This function is used to get the OCS field from UTRD
* Returns the OCS field in the UTRD
*/
-static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
+ struct cq_entry *cqe)
{
+ if (cqe)
+ return le32_to_cpu(cqe->status) & MASK_OCS;
+
return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
}
@@ -1121,6 +1162,12 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
return pending;
}
+/*
+ * Wait until all pending SCSI commands and TMFs have finished or the timeout
+ * has expired.
+ *
+ * Return: 0 upon success; -EBUSY upon timeout.
+ */
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
@@ -1154,7 +1201,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- schedule();
+ io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
timeout = true;
@@ -1225,21 +1272,28 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
return ret;
}
-static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+/*
+ * Wait until all pending SCSI commands and TMFs have finished or the timeout
+ * has expired.
+ *
+ * Return: 0 upon success; -EBUSY upon timeout.
+ */
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
{
- #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
int ret = 0;
/*
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
ufshcd_scsi_block_requests(hba);
+ mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
- ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
ufshcd_scsi_unblock_requests(hba);
goto out;
}
@@ -1251,12 +1305,16 @@ out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
{
- if (writelock)
- up_write(&hba->clk_scaling_lock);
- else
- up_read(&hba->clk_scaling_lock);
+ up_write(&hba->clk_scaling_lock);
+
+ /* Enable Write Booster if we have scaled up else disable it */
+ if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
+ ufshcd_wb_toggle(hba, scale_up);
+
+ mutex_unlock(&hba->wb_mutex);
+
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
}
@@ -1273,9 +1331,8 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
- bool is_writelock = true;
- ret = ufshcd_clock_scaling_prepare(hba);
+ ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
@@ -1302,15 +1359,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
}
- /* Enable Write Booster if we have scaled up else disable it */
- if (ufshcd_enable_wb_if_scaling_up(hba)) {
- downgrade_write(&hba->clk_scaling_lock);
- is_writelock = false;
- ufshcd_wb_toggle(hba, scale_up);
- }
-
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, is_writelock);
+ ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
return ret;
}
@@ -2138,9 +2188,11 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
* @task_tag: Task tag of the command
+ * @hwq: pointer to hardware queue instance
*/
static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
+ struct ufs_hw_queue *hwq)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
@@ -2154,12 +2206,24 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- if (hba->vops && hba->vops->setup_xfer_req)
- hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ if (is_mcq_enabled(hba)) {
+ int utrd_size = sizeof(struct utp_transfer_req_desc);
+
+ spin_lock(&hwq->sq_lock);
+ memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size),
+ lrbp->utr_descriptor_ptr, utrd_size);
+ ufshcd_inc_sq_tail(hwq);
+ spin_unlock(&hwq->sq_lock);
+ } else {
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ if (hba->vops && hba->vops->setup_xfer_req)
+ hba->vops->setup_xfer_req(hba, lrbp->task_tag,
+ !!lrbp->cmd);
+ __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << lrbp->task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ }
}
/**
@@ -2247,6 +2311,14 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
if (err)
dev_err(hba->dev, "crypto setup failed\n");
+ hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
+ if (!hba->mcq_sup)
+ return err;
+
+ hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
+ hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
+ hba->mcq_capabilities);
+
return err;
}
@@ -2399,38 +2471,30 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
}
/**
- * ufshcd_map_sg - Map scatter-gather list to prdt
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- *
- * Returns 0 in case of success, non-zero value in case of failure
+ * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
+ * @hba: per-adapter instance
+ * @lrbp: pointer to local reference block
+ * @sg_entries: The number of sg lists actually used
+ * @sg_list: Pointer to SG list
*/
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
+ struct scatterlist *sg_list)
{
- struct ufshcd_sg_entry *prd_table;
+ struct ufshcd_sg_entry *prd;
struct scatterlist *sg;
- struct scsi_cmnd *cmd;
- int sg_segments;
int i;
- cmd = lrbp->cmd;
- sg_segments = scsi_dma_map(cmd);
- if (sg_segments < 0)
- return sg_segments;
-
- if (sg_segments) {
+ if (sg_entries) {
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((sg_segments *
- sizeof(struct ufshcd_sg_entry)));
+ cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
else
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16(sg_segments);
+ lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
- prd_table = lrbp->ucd_prdt_ptr;
+ prd = lrbp->ucd_prdt_ptr;
- scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ for_each_sg(sg_list, sg, sg_entries, i) {
const unsigned int len = sg_dma_len(sg);
/*
@@ -2442,13 +2506,32 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* indicates 4 bytes, '7' indicates 8 bytes, etc."
*/
WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
- prd_table[i].size = cpu_to_le32(len - 1);
- prd_table[i].addr = cpu_to_le64(sg->dma_address);
- prd_table[i].reserved = 0;
+ prd->size = cpu_to_le32(len - 1);
+ prd->addr = cpu_to_le64(sg->dma_address);
+ prd->reserved = 0;
+ prd = (void *)prd + ufshcd_sg_entry_size(hba);
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
}
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ int sg_segments = scsi_dma_map(cmd);
+
+ if (sg_segments < 0)
+ return sg_segments;
+
+ ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
return 0;
}
@@ -2496,14 +2579,15 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
}
/**
- * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
+ * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
*/
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
- u8 *upiu_flags, enum dma_data_direction cmd_dir)
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
+ enum dma_data_direction cmd_dir, int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
u32 data_direction;
@@ -2522,8 +2606,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
*upiu_flags = UPIU_CMD_FLAGS_NONE;
}
- dword_0 = data_direction | (lrbp->command_type
- << UPIU_COMMAND_TYPE_OFFSET);
+ dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
+ ehs_length << 8;
if (lrbp->intr_cmd)
dword_0 |= UTP_REQ_DESC_INT_CMD;
@@ -2578,8 +2662,7 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
}
/**
- * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
- * for query requsts
+ * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
* @hba: UFS hba
* @lrbp: local reference block pointer
* @upiu_flags: flags
@@ -2650,7 +2733,7 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
@@ -2678,8 +2761,7 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
ret = -EINVAL;
@@ -2711,33 +2793,38 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
*/
static void ufshcd_map_queues(struct Scsi_Host *shost)
{
- int i;
+ struct ufs_hba *hba = shost_priv(shost);
+ int i, queue_offset = 0;
+
+ if (!is_mcq_supported(hba)) {
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
+ hba->nr_queues[HCTX_TYPE_READ] = 0;
+ hba->nr_queues[HCTX_TYPE_POLL] = 1;
+ hba->nr_hw_queues = 1;
+ }
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
- switch (i) {
- case HCTX_TYPE_DEFAULT:
- case HCTX_TYPE_POLL:
- map->nr_queues = 1;
- break;
- case HCTX_TYPE_READ:
- map->nr_queues = 0;
+ map->nr_queues = hba->nr_queues[i];
+ if (!map->nr_queues)
continue;
- default:
- WARN_ON_ONCE(true);
- }
- map->queue_offset = 0;
+ map->queue_offset = queue_offset;
+ if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
+ map->queue_offset = 0;
+
blk_mq_map_queues(map);
+ queue_offset += map->nr_queues;
}
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
- struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+ struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
+ i * sizeof_utp_transfer_cmd_desc(hba);
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * sizeof(struct utp_transfer_cmd_desc);
+ i * sizeof_utp_transfer_cmd_desc(hba);
u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
response_upiu);
u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -2745,11 +2832,11 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr +
i * sizeof(struct utp_transfer_req_desc);
- lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
lrb->ucd_req_dma_addr = cmd_desc_element_addr;
- lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
}
@@ -2766,6 +2853,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp;
int err = 0;
+ struct ufs_hw_queue *hwq = NULL;
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
@@ -2850,7 +2938,10 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
- ufshcd_send_command(hba, tag);
+ if (is_mcq_enabled(hba))
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+
+ ufshcd_send_command(hba, tag, hwq);
out:
rcu_read_unlock();
@@ -2945,6 +3036,12 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
__func__);
break;
+ case UPIU_TRANSACTION_RESPONSE:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
+ }
+ break;
default:
err = -EINVAL;
dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
@@ -2974,7 +3071,7 @@ retry:
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
- err = ufshcd_get_tr_ocs(lrbp);
+ err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
} else {
@@ -3010,6 +3107,22 @@ retry:
} else {
dev_err(hba->dev, "%s: failed to clear tag %d\n",
__func__, lrbp->task_tag);
+
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ pending = test_bit(lrbp->task_tag,
+ &hba->outstanding_reqs);
+ if (pending)
+ hba->dev_cmd.complete = NULL;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (!pending) {
+ /*
+ * The completion handler ran while we tried to
+ * clear the command.
+ */
+ time_left = 1;
+ goto retry;
+ }
}
}
@@ -3045,10 +3158,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out;
hba->dev_cmd.complete = &wait;
+ hba->dev_cmd.cqe = NULL;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag);
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
@@ -3369,37 +3483,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
}
/**
- * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_len: mapped desc length (out)
- */
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_len)
-{
- if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
- desc_id == QUERY_DESC_IDN_RFU_1)
- *desc_len = 0;
- else
- *desc_len = hba->desc_size[desc_id];
-}
-EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
-
-static void ufshcd_update_desc_length(struct ufs_hba *hba,
- enum desc_idn desc_id, int desc_index,
- unsigned char desc_len)
-{
- if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
- desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
- /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
- * than the RPMB unit, however, both descriptors share the same
- * desc_idn, to cover both unit descriptors with one length, we
- * choose the normal unit descriptor length by desc_index.
- */
- hba->desc_size[desc_id] = desc_len;
-}
-
-/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -3419,26 +3502,13 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
{
int ret;
u8 *desc_buf;
- int buff_len;
+ int buff_len = QUERY_DESC_MAX_SIZE;
bool is_kmalloc = true;
/* Safety check */
if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- /* Get the length of descriptor */
- ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!buff_len) {
- dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
- return -EINVAL;
- }
-
- if (param_offset >= buff_len) {
- dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
- __func__, param_offset, desc_id, buff_len);
- return -EINVAL;
- }
-
/* Check whether we need temp memory */
if (param_offset != 0 || param_size < buff_len) {
desc_buf = kzalloc(buff_len, GFP_KERNEL);
@@ -3451,15 +3521,24 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
/* Request for full descriptor */
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0,
- desc_buf, &buff_len);
-
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
__func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Update descriptor length */
+ buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
+
+ if (param_offset >= buff_len) {
+ dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
+ __func__, param_offset, desc_id, buff_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
/* Sanity check */
if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
@@ -3468,10 +3547,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
goto out;
}
- /* Update descriptor length */
- buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
- ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
-
if (is_kmalloc) {
/* Make sure we don't copy more data than available */
if (param_offset >= buff_len)
@@ -3658,7 +3733,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */
- ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
@@ -3666,12 +3741,9 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/*
* UFSHCI requires UTP command descriptor to be 128 byte aligned.
- * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
- * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
- * be aligned to 128 bytes as well
*/
if (!hba->ucdl_base_addr ||
- WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
@@ -3687,13 +3759,21 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
- WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) {
dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
/*
+ * Skip utmrdl allocation; it may have been
+ * allocated during first pass and not released during
+ * MCQ memory allocation.
+ * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
+ */
+ if (hba->utmrdl_base_addr)
+ goto skip_utmrdl;
+ /*
* Allocate memory for UTP Task Management descriptors
* UFSHCI requires 1024 byte alignment of UTMRD
*/
@@ -3703,12 +3783,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
- WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) {
dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
+skip_utmrdl:
/* Allocate memory for local reference block */
hba->lrb = devm_kcalloc(hba->dev,
hba->nutrs, sizeof(struct ufshcd_lrb),
@@ -3752,7 +3833,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table);
- cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) {
@@ -4909,7 +4990,7 @@ static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
*/
static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
{
- int len = hba->desc_size[QUERY_DESC_IDN_UNIT];
+ int len = QUERY_DESC_MAX_SIZE;
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
u8 lun_qdepth = hba->nutrs;
u8 *desc_buf;
@@ -4944,6 +5025,12 @@ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
hba->dev_info.is_lu_power_on_wp = true;
+ /* In case of RPMB LU, check if advanced RPMB mode is enabled */
+ if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
+ desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
+ hba->dev_info.b_advanced_rpmb_en = true;
+
+
kfree(desc_buf);
set_qdepth:
/*
@@ -5032,8 +5119,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
ufshcd_hpb_configure(hba, sdev);
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
- if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
- blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
+ if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
+ blk_queue_update_dma_alignment(q, 4096 - 1);
/*
* Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links().
@@ -5132,18 +5219,20 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
* @lrbp: pointer to local reference block of completed command
+ * @cqe: pointer to the completion queue entry
*
* Returns result of the command to notify SCSI midlayer
*/
static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ struct cq_entry *cqe)
{
int result = 0;
int scsi_status;
enum utp_ocs ocs;
/* overall command status of utrd */
- ocs = ufshcd_get_tr_ocs(lrbp);
+ ocs = ufshcd_get_tr_ocs(lrbp, cqe);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
@@ -5308,42 +5397,53 @@ static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
}
/**
- * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_compl_one_cqe - handle a completion queue entry
* @hba: per adapter instance
- * @completed_reqs: bitmask that indicates which requests to complete
+ * @task_tag: the task tag of the request to be completed
+ * @cqe: pointer to the completion queue entry
*/
-static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
+void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- int index;
-
- for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- lrbp = &hba->lrb[index];
- lrbp->compl_time_stamp = ktime_get();
- lrbp->compl_time_stamp_local_clock = local_clock();
- cmd = lrbp->cmd;
- if (cmd) {
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_update_monitor(hba, lrbp);
- ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
- ufshcd_release_scsi_cmd(hba, lrbp);
- /* Do not touch lrbp after scsi done */
- scsi_done(cmd);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete) {
- ufshcd_add_command_trace(hba, index,
- UFS_DEV_COMP);
- complete(hba->dev_cmd.complete);
- ufshcd_clk_scaling_update_busy(hba);
- }
+
+ lrbp = &hba->lrb[task_tag];
+ lrbp->compl_time_stamp = ktime_get();
+ cmd = lrbp->cmd;
+ if (cmd) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_update_monitor(hba, lrbp);
+ ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ /* Do not touch lrbp after scsi done */
+ scsi_done(cmd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ if (hba->dev_cmd.complete) {
+ hba->dev_cmd.cqe = cqe;
+ ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
+ complete(hba->dev_cmd.complete);
+ ufshcd_clk_scaling_update_busy(hba);
}
}
}
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: bitmask that indicates which requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
+{
+ int tag;
+
+ for_each_set_bit(tag, &completed_reqs, hba->nutrs)
+ ufshcd_compl_one_cqe(hba, tag, NULL);
+}
+
/* Any value that is not an existing queue number is fine for this constant. */
enum {
UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
@@ -5373,6 +5473,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
+ struct ufs_hw_queue *hwq;
+
+ if (is_mcq_enabled(hba)) {
+ hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+
+ return ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -6056,11 +6163,21 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
+static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+{
+ spin_lock_irq(hba->host->host_lock);
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
+ spin_unlock_irq(hba->host->host_lock);
+}
+
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
+ mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = allow;
up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
}
static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
@@ -6596,6 +6713,40 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
}
/**
+ * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
+ * @hba: per adapter instance
+ *
+ * Returns IRQ_HANDLED if interrupt is handled
+ */
+static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ unsigned long outstanding_cqs;
+ unsigned int nr_queues;
+ int i, ret;
+ u32 events;
+
+ ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
+ if (ret)
+ outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
+
+ /* Exclude the poll queues */
+ nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ for_each_set_bit(i, &outstanding_cqs, nr_queues) {
+ hwq = &hba->uhq[i];
+
+ events = ufshcd_mcq_read_cqis(hba, i);
+ if (events)
+ ufshcd_mcq_write_cqis(hba, events, i);
+
+ if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
@@ -6620,6 +6771,9 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (intr_status & UTP_TRANSFER_REQ_COMPL)
retval |= ufshcd_transfer_req_compl(hba);
+ if (intr_status & MCQ_CQ_EVENT_STATUS)
+ retval |= ufshcd_handle_mcq_cq_events(hba);
+
return retval;
}
@@ -6868,7 +7022,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
/* update the task tag in the request upiu */
req_upiu->header.dword_0 |= cpu_to_be32(tag);
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
/* just copy the upiu request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
@@ -6887,7 +7041,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag);
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -6992,6 +7146,100 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
}
/**
+ * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
+ * @hba: per adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply
+ * @req_ehs: EHS field which contains Advanced RPMB Request Message
+ * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
+ * @sg_cnt: The number of sg lists actually used
+ * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
+ * @dir: DMA direction
+ *
+ * Returns zero on success, non-zero on failure
+ */
+int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
+ struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ const u32 tag = hba->reserved_slot;
+ struct ufshcd_lrb *lrbp;
+ int err = 0;
+ int result;
+ u8 upiu_flags;
+ u8 *ehs_data;
+ u16 ehs_len;
+
+ /* Protects use of hba->reserved_slot. */
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ down_read(&hba->clk_scaling_lock);
+
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ lrbp->cmd = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = UFS_UPIU_RPMB_WLUN;
+
+ lrbp->intr_cmd = true;
+ ufshcd_prepare_lrbp_crypto(NULL, lrbp);
+ hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
+
+ /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
+
+ /* update the task tag and LUN in the request upiu */
+ req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
+
+ /* copy the UPIU(contains CDB) request as it is */
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+ /* Copy EHS, starting with byte32, immediately after the CDB package */
+ memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
+
+ if (dir != DMA_NONE && sg_list)
+ ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
+
+ if (!err) {
+ /* Just copy the upiu response as it is */
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+ /* Get the response UPIU result */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
+ /*
+ * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
+ * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
+ * Message is 02h
+ */
+ if (ehs_len == 2 && rsp_ehs) {
+ /*
+ * ucd_rsp_ptr points to a buffer with a length of 512 bytes
+ * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
+ */
+ ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
+ memcpy(rsp_ehs, ehs_data, ehs_len * 32);
+ }
+ }
+
+ up_read(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+ return err ? : result;
+}
+
+/**
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
@@ -7433,12 +7681,11 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
* In case regulators are not initialized we'll return 0
* @hba: per-adapter instance
* @desc_buf: power descriptor buffer to extract ICC levels from.
- * @len: length of desc_buff
*
* Returns calculated ICC level
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- const u8 *desc_buf, int len)
+ const u8 *desc_buf)
{
u32 icc_level = 0;
@@ -7480,25 +7727,23 @@ out:
static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
- int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
u8 *desc_buf;
u32 icc_level;
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf)
return;
ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
- desc_buf, buff_len);
+ desc_buf, QUERY_DESC_MAX_SIZE);
if (ret) {
dev_err(hba->dev,
- "%s: Failed reading power descriptor.len = %d ret = %d",
- __func__, buff_len, ret);
+ "%s: Failed reading power descriptor ret = %d",
+ __func__, ret);
goto out;
}
- icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
- buff_len);
+ icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
@@ -7608,10 +7853,6 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
(hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
goto wb_disabled;
- if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
- goto wb_disabled;
-
ext_ufs_feature = get_unaligned_be32(desc_buf +
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
@@ -7682,6 +7923,31 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u32 ext_ufs_feature;
+ u32 ext_iid_en = 0;
+ int err;
+
+ /* Only UFS-4.0 and above may support EXT_IID */
+ if (dev_info->wspecversion < 0x400)
+ goto out;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
+ goto out;
+
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
+ if (err)
+ dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
+
+out:
+ dev_info->b_ext_iid_en = ext_iid_en;
+}
+
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups)
{
@@ -7719,14 +7985,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
- desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
- hba->desc_size[QUERY_DESC_IDN_DEVICE]);
+ QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -7743,6 +8009,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -7780,6 +8047,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (hba->ext_iid_sup)
+ ufshcd_ext_iid_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -7973,18 +8243,16 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
{
int err;
- size_t buff_len;
u8 *desc_buf;
- buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
- desc_buf, buff_len);
+ desc_buf, QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
__func__, err);
@@ -7996,7 +8264,7 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
- if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+ if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
ufshpb_get_geo_info(hba, desc_buf);
@@ -8081,11 +8349,7 @@ out:
static int ufshcd_device_params_init(struct ufs_hba *hba)
{
bool flag;
- int ret, i;
-
- /* Init device descriptor sizes */
- for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
- hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
+ int ret;
/* Init UFS geometry descriptor related parameters */
ret = ufshcd_device_geo_params_init(hba);
@@ -8153,27 +8417,96 @@ out:
return ret;
}
-/**
- * ufshcd_probe_hba - probe hba to detect device and initialize it
- * @hba: per-adapter instance
- * @init_dev_params: whether or not to call ufshcd_device_params_init().
- *
- * Execute link-startup and verify device initialization
- */
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+/* SDB - Single Doorbell */
+static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
+{
+ size_t ucdl_size, utrdl_size;
+
+ ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+ dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
+ hba->ucdl_dma_addr);
+
+ utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
+ dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
+ hba->utrdl_dma_addr);
+
+ devm_kfree(hba->dev, hba->lrb);
+}
+
+static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{
int ret;
- unsigned long flags;
- ktime_t start = ktime_get();
+ int old_nutrs = hba->nutrs;
+
+ ret = ufshcd_mcq_decide_queue_depth(hba);
+ if (ret < 0)
+ return ret;
+
+ hba->nutrs = ret;
+ ret = ufshcd_mcq_init(hba);
+ if (ret)
+ goto err;
+
+ /*
+ * Previously allocated memory for nutrs may not be enough in MCQ mode.
+ * Number of supported tags in MCQ mode may be larger than SDB mode.
+ */
+ if (hba->nutrs != old_nutrs) {
+ ufshcd_release_sdb_queue(hba, old_nutrs);
+ ret = ufshcd_memory_alloc(hba);
+ if (ret)
+ goto err;
+ ufshcd_host_memory_configure(hba);
+ }
+
+ ret = ufshcd_mcq_memory_alloc(hba);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ hba->nutrs = old_nutrs;
+ return ret;
+}
+
+static void ufshcd_config_mcq(struct ufs_hba *hba)
+{
+ int ret;
+
+ ret = ufshcd_mcq_vops_config_esi(hba);
+ dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
+
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS);
+ ufshcd_mcq_make_queues_operational(hba);
+ ufshcd_mcq_config_mac(hba, hba->nutrs);
+
+ hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+ hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
+
+ /* Select MCQ mode */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
+ REG_UFS_MEM_CFG);
+ hba->mcq_enabled = true;
+
+ dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
+ hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
+ hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
+ hba->nutrs);
+}
+
+static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
+{
+ int ret;
+ struct Scsi_Host *host = hba->host;
hba->ufshcd_state = UFSHCD_STATE_RESET;
ret = ufshcd_link_startup(hba);
if (ret)
- goto out;
+ return ret;
if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
- goto out;
+ return ret;
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
@@ -8181,15 +8514,19 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
+ /* Reconfigure MCQ upon reset */
+ if (is_mcq_enabled(hba) && !init_dev_params)
+ ufshcd_config_mcq(hba);
+
/* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
if (ret)
- goto out;
+ return ret;
/* Initiate UFS initialization, and waiting until completion */
ret = ufshcd_complete_dev_init(hba);
if (ret)
- goto out;
+ return ret;
/*
* Initialize UFS device parameters used by driver, these
@@ -8198,7 +8535,25 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (init_dev_params) {
ret = ufshcd_device_params_init(hba);
if (ret)
- goto out;
+ return ret;
+ if (is_mcq_supported(hba) && !hba->scsi_host_added) {
+ ret = ufshcd_alloc_mcq(hba);
+ if (ret) {
+ /* Continue with SDB mode */
+ use_mcq_mode = false;
+ dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
+ ret);
+ }
+ ret = scsi_add_host(host, hba->dev);
+ if (ret) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ return ret;
+ }
+ hba->scsi_host_added = true;
+ }
+ /* MCQ may be disabled if ufshcd_alloc_mcq() fails */
+ if (is_mcq_supported(hba) && use_mcq_mode)
+ ufshcd_config_mcq(hba);
}
ufshcd_tune_unipro_params(hba);
@@ -8219,11 +8574,51 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize it
+ * @hba: per-adapter instance
+ * @init_dev_params: whether or not to call ufshcd_device_params_init().
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+{
+ ktime_t start = ktime_get();
+ unsigned long flags;
+ int ret;
+
+ ret = ufshcd_device_init(hba, init_dev_params);
+ if (ret)
+ goto out;
+
+ if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
+ /* Reset the device and controller before doing reinit */
+ ufshcd_device_reset(hba);
+ ufshcd_hba_stop(hba);
+ ufshcd_vops_reinit_notify(hba);
+ ret = ufshcd_hba_enable(hba);
+ if (ret) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_evt_hist(hba);
+ ufshcd_print_host_state(hba);
goto out;
}
- ufshcd_print_pwr_info(hba);
+
+ /* Reinit the device */
+ ret = ufshcd_device_init(hba, init_dev_params);
+ if (ret)
+ goto out;
}
+ ufshcd_print_pwr_info(hba);
+
/*
* bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
* and for removable UFS card as well, hence always set the parameter.
@@ -8351,7 +8746,6 @@ static struct scsi_host_template ufshcd_driver_template = {
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
- .dma_boundary = PAGE_SIZE - 1,
.rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
@@ -9083,6 +9477,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret && pm_op != UFS_SHUTDOWN_PM) {
+ /*
+ * If return err in suspend flow, IO will hang.
+ * Trigger error handler and break suspend for
+ * error recovery.
+ */
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
+ }
if (ret)
goto enable_scaling;
}
@@ -9094,6 +9497,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
+ if (ret && pm_op != UFS_SHUTDOWN_PM) {
+ /*
+ * If return err in suspend flow, IO will hang.
+ * Trigger error handler and break suspend for
+ * error recovery.
+ */
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
+ }
if (ret)
goto set_dev_active;
@@ -9427,6 +9839,7 @@ static int ufshcd_resume(struct ufs_hba *hba)
/* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba);
+
goto out;
disable_vreg:
@@ -9590,6 +10003,56 @@ void ufshcd_remove(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
+#ifdef CONFIG_PM_SLEEP
+int ufshcd_system_freeze(struct device *dev)
+{
+
+ return ufshcd_system_suspend(dev);
+
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
+
+int ufshcd_system_restore(struct device *dev)
+{
+
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_resume(dev);
+ if (ret)
+ return ret;
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+ /*
+ * Make sure that UTRL and UTMRL base address registers
+ * are updated with the latest queue addresses. Only after
+ * updating these addresses, we can queue the new commands.
+ */
+ mb();
+
+ /* Resuming from hibernate, assume that link was OFF */
+ ufshcd_set_link_off(hba);
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_restore);
+
+int ufshcd_system_thaw(struct device *dev)
+{
+ return ufshcd_system_resume(dev);
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
+#endif /* CONFIG_PM_SLEEP */
+
/**
* ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
* @hba: pointer to Host Bus Adapter (HBA)
@@ -9648,6 +10111,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
hba->nop_out_timeout = NOP_OUT_TIMEOUT;
+ ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock);
@@ -9767,6 +10231,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize mutex for exception event control */
mutex_init(&hba->ee_ctrl_mutex);
+ mutex_init(&hba->wb_mutex);
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
@@ -9796,10 +10261,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->is_irq_enabled = true;
}
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
+ if (!is_mcq_supported(hba)) {
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto out_disable;
+ }
}
hba->tmf_tag_set = (struct blk_mq_tag_set) {
@@ -10027,11 +10494,6 @@ static int __init ufshcd_core_init(void)
{
int ret;
- /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
- static_assert(sizeof(struct utp_transfer_cmd_desc) ==
- 2 * ALIGNED_UPIU_SIZE +
- SG_ALL * sizeof(struct ufshcd_sg_entry));
-
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index 994f4ac9df5a..a46a7666c891 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -2382,12 +2382,10 @@ static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
{
u16 max_active_rgns;
u8 lu_enable;
- int size;
+ int size = QUERY_DESC_MAX_SIZE;
int ret;
char desc_buf[QUERY_DESC_MAX_SIZE];
- ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
-
ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
QUERY_DESC_IDN_UNIT, lun, 0,