summaryrefslogtreecommitdiff
path: root/include/ufs/ufshcd.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/ufs/ufshcd.h')
-rw-r--r--include/ufs/ufshcd.h108
1 files changed, 88 insertions, 20 deletions
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index f66a275bf8cc..47cba116f87b 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -16,6 +16,8 @@
#include <linux/blk-crypto-profile.h>
#include <linux/blk-mq.h>
#include <linux/devfreq.h>
+#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/msi.h>
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
@@ -28,6 +30,7 @@
#define UFSHCD "ufshcd"
+struct scsi_device;
struct ufs_hba;
enum dev_cmd_type {
@@ -71,8 +74,8 @@ enum ufs_event_type {
* @done: UIC command completion
*/
struct uic_command {
- u32 command;
- u32 argument1;
+ const u32 command;
+ const u32 argument1;
u32 argument2;
u32 argument3;
int cmd_active;
@@ -293,6 +296,7 @@ struct ufs_pwr_mode_info {
/**
* struct ufs_hba_variant_ops - variant specific callbacks
* @name: variant name
+ * @max_num_rtt: maximum RTT supported by the host
* @init: called when the driver is initialized
* @exit: called to cleanup everything done in init
* @set_dma_mask: For setting another DMA mask than indicated by the 64AS
@@ -323,16 +327,20 @@ struct ufs_pwr_mode_info {
* @device_reset: called to issue a reset pulse on the UFS device
* @config_scaling_param: called to configure clock scaling parameters
* @program_key: program or evict an inline encryption key
+ * @fill_crypto_prdt: initialize crypto-related fields in the PRDT
* @event_notify: called to notify important events
- * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources
- * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
+ * @get_hba_mac: reports maximum number of outstanding commands supported by
+ * the controller. Should be implemented for UFSHCI 4.0 or later
+ * controllers that are not compliant with the UFSHCI 4.0 specification.
* @op_runtime_config: called to config Operation and runtime regs Pointers
* @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt
+ * @config_scsi_dev: called to configure SCSI device parameters
*/
struct ufs_hba_variant_ops {
const char *name;
+ int max_num_rtt;
int (*init)(struct ufs_hba *);
void (*exit)(struct ufs_hba *);
u32 (*get_ufs_hci_version)(struct ufs_hba *);
@@ -367,9 +375,11 @@ struct ufs_hba_variant_ops {
struct devfreq_simple_ondemand_data *data);
int (*program_key)(struct ufs_hba *hba,
const union ufs_crypto_cfg_entry *cfg, int slot);
+ int (*fill_crypto_prdt)(struct ufs_hba *hba,
+ const struct bio_crypt_ctx *crypt_ctx,
+ void *prdt, unsigned int num_segments);
void (*event_notify)(struct ufs_hba *hba,
enum ufs_event_type evt, void *data);
- void (*reinit_notify)(struct ufs_hba *);
int (*mcq_config_resource)(struct ufs_hba *hba);
int (*get_hba_mac)(struct ufs_hba *hba);
int (*op_runtime_config)(struct ufs_hba *hba);
@@ -392,6 +402,9 @@ enum clk_gating_state {
* delay_ms
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
+ * @clk_gating_workq: workqueue for clock gating work.
+ * @lock: serialize access to some struct ufs_clk_gating members. An outer lock
+ * relative to the host lock
* @state: the current clocks state
* @delay_ms: gating delay in ms
* @is_suspended: clk gating is suspended when set to 1 which can be used
@@ -402,11 +415,14 @@ enum clk_gating_state {
* @is_initialized: Indicates whether clock gating is initialized or not
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
- * @clk_gating_workq: workqueue for clock gating work.
*/
struct ufs_clk_gating {
struct delayed_work gate_work;
struct work_struct ungate_work;
+ struct workqueue_struct *clk_gating_workq;
+
+ spinlock_t lock;
+
enum clk_gating_state state;
unsigned long delay_ms;
bool is_suspended;
@@ -415,7 +431,6 @@ struct ufs_clk_gating {
bool is_enabled;
bool is_initialized;
int active_reqs;
- struct workqueue_struct *clk_gating_workq;
};
/**
@@ -432,6 +447,7 @@ struct ufs_clk_gating {
* @workq: workqueue to schedule devfreq suspend/resume work
* @suspend_work: worker to suspend devfreq
* @resume_work: worker to resume devfreq
+ * @target_freq: frequency requested by devfreq framework
* @min_gear: lowest HS gear to scale down to
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
* clkscale_enable sysfs node
@@ -451,12 +467,14 @@ struct ufs_clk_scaling {
struct workqueue_struct *workq;
struct work_struct suspend_work;
struct work_struct resume_work;
+ unsigned long target_freq;
u32 min_gear;
bool is_enabled;
bool is_allowed;
bool is_initialized;
bool is_busy_started;
bool is_suspended;
+ bool suspend_on_no_request;
};
#define UFS_EVENT_HIST_LENGTH 8
@@ -602,11 +620,6 @@ enum ufshcd_quirks {
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
/*
- * Align DMA SG entries on a 4 KiB boundary.
- */
- UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14,
-
- /*
* This quirk needs to be enabled if the host controller does not
* support UIC command
*/
@@ -642,6 +655,38 @@ enum ufshcd_quirks {
* thus need this quirk to skip related flow.
*/
UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21,
+
+ /*
+ * This quirk needs to be enabled if the host controller supports inline
+ * encryption but it needs to initialize the crypto capabilities in a
+ * nonstandard way and/or needs to override blk_crypto_ll_ops. If
+ * enabled, the standard code won't initialize the blk_crypto_profile;
+ * ufs_hba_variant_ops::init() must do it instead.
+ */
+ UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 22,
+
+ /*
+ * This quirk needs to be enabled if the host controller supports inline
+ * encryption but does not support the CRYPTO_GENERAL_ENABLE bit, i.e.
+ * host controller initialization fails if that bit is set.
+ */
+ UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 23,
+
+ /*
+ * This quirk needs to be enabled if the host controller driver copies
+ * cryptographic keys into the PRDT in order to send them to hardware,
+ * and therefore the PRDT should be zeroized after each request (as per
+ * the standard best practice for managing keys).
+ */
+ UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24,
+
+ /*
+ * This quirk indicates that the controller reports the value 1 (not
+ * supported) in the Legacy Single DoorBell Support (LSDBS) bit of the
+ * Controller Capabilities register although it supports the legacy
+ * single doorbell mode.
+ */
+ UFSHCD_QUIRK_BROKEN_LSDBS_CAP = 1 << 25,
};
enum ufshcd_caps {
@@ -818,6 +863,7 @@ enum ufshcd_mcq_opr {
* @capabilities: UFS Controller Capabilities
* @mcq_capabilities: UFS Multi Circular Queue capabilities
* @nutrs: Transfer Request Queue depth supported by controller
+ * @nortt - Max outstanding RTTs supported by controller
* @nutmrs: Task Management Queue depth supported by controller
* @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
* @ufs_version: UFS Version to which controller complies
@@ -864,6 +910,7 @@ enum ufshcd_mcq_opr {
* @auto_bkops_enabled: to track whether bkops is enabled in device
* @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head
+ * @use_pm_opp: Indicates whether OPP based scaling is used or not
* @req_abort_count: number of times ufshcd_abort() has been called
* @lanes_per_direction: number of lanes per data direction between the UFS
* controller and the UFS device.
@@ -910,6 +957,10 @@ enum ufshcd_mcq_opr {
* @mcq_base: Multi circular queue registers base address
* @uhq: array of supported hardware queues
* @dev_cmd_queue: Queue for issuing device management commands
+ * @mcq_opr: MCQ operation and runtime registers
+ * @ufs_rtc_update_work: A work for UFS RTC periodic update
+ * @pm_qos_req: PM QoS request handle
+ * @pm_qos_enabled: flag to check if pm qos is enabled
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -951,6 +1002,7 @@ struct ufs_hba {
u32 capabilities;
int nutrs;
+ int nortt;
u32 mcq_capabilities;
int nutmrs;
u32 reserved_slot;
@@ -1014,6 +1066,7 @@ struct ufs_hba {
bool auto_bkops_enabled;
struct ufs_vreg_info vreg_info;
struct list_head clk_list_head;
+ bool use_pm_opp;
/* Number of requests aborts */
int req_abort_count;
@@ -1056,6 +1109,10 @@ struct ufs_hba {
struct delayed_work debugfs_ee_work;
u32 debugfs_ee_rate_limit_ms;
#endif
+#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
+ struct fault_attr trigger_eh_attr;
+ struct fault_attr timeout_attr;
+#endif
u32 luns_avail;
unsigned int nr_hw_queues;
unsigned int nr_queues[HCTX_MAX_TYPES];
@@ -1070,6 +1127,10 @@ struct ufs_hba {
struct ufs_hw_queue *uhq;
struct ufs_hw_queue *dev_cmd_queue;
struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
+
+ struct delayed_work ufs_rtc_update_work;
+ struct pm_qos_request pm_qos_req;
+ bool pm_qos_enabled;
};
/**
@@ -1112,10 +1173,7 @@ struct ufs_hw_queue {
struct mutex sq_mutex;
};
-static inline bool is_mcq_enabled(struct ufs_hba *hba)
-{
- return hba->mcq_enabled;
-}
+#define MCQ_QCFG_SIZE 0x40
static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
enum ufshcd_mcq_opr opr, int idx)
@@ -1123,6 +1181,11 @@ static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx;
}
+static inline unsigned int ufshcd_mcq_cfg_offset(unsigned int reg, int idx)
+{
+ return reg + MCQ_QCFG_SIZE * idx;
+}
+
#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
{
@@ -1236,8 +1299,9 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
ufshcd_writel(hba, tmp, reg);
}
+void ufshcd_enable_irq(struct ufs_hba *hba);
+void ufshcd_disable_irq(struct ufs_hba *hba);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
-void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
@@ -1251,14 +1315,19 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
+void ufshcd_mcq_enable(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
+int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down);
/**
* ufshcd_set_variant - set variant specific data to the hba
* @hba: per adapter instance
@@ -1362,7 +1431,6 @@ static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
}
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups);
@@ -1376,8 +1444,6 @@ void ufshcd_release(struct ufs_hba *hba);
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
-
int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
@@ -1392,6 +1458,8 @@ int ufshcd_suspend_prepare(struct device *dev);
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
bool ufshcd_is_hba_active(struct ufs_hba *hba);
+void ufshcd_pm_qos_init(struct ufs_hba *hba);
+void ufshcd_pm_qos_exit(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline int ufshcd_vops_init(struct ufs_hba *hba)