summaryrefslogtreecommitdiff
path: root/include/ufs/ufshcd.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/ufs/ufshcd.h')
-rw-r--r--include/ufs/ufshcd.h77
1 files changed, 51 insertions, 26 deletions
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 74e5b9960c54..1d3943777584 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -246,7 +246,7 @@ struct ufs_query {
struct ufs_dev_cmd {
enum dev_cmd_type type;
struct mutex lock;
- struct completion *complete;
+ struct completion complete;
struct ufs_query query;
};
@@ -326,7 +326,6 @@ struct ufs_pwr_mode_info {
* @phy_initialization: used to initialize phys
* @device_reset: called to issue a reset pulse on the UFS device
* @config_scaling_param: called to configure clock scaling parameters
- * @program_key: program or evict an inline encryption key
* @fill_crypto_prdt: initialize crypto-related fields in the PRDT
* @event_notify: called to notify important events
* @mcq_config_resource: called to configure MCQ platform resources
@@ -337,6 +336,7 @@ struct ufs_pwr_mode_info {
* @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt
* @config_scsi_dev: called to configure SCSI device parameters
+ * @freq_to_gear_speed: called to map clock frequency to the max supported gear speed
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -345,8 +345,8 @@ struct ufs_hba_variant_ops {
void (*exit)(struct ufs_hba *);
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*set_dma_mask)(struct ufs_hba *);
- int (*clk_scale_notify)(struct ufs_hba *, bool,
- enum ufs_notify_change_status);
+ int (*clk_scale_notify)(struct ufs_hba *, bool, unsigned long,
+ enum ufs_notify_change_status);
int (*setup_clocks)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
int (*hce_enable_notify)(struct ufs_hba *,
@@ -354,9 +354,9 @@ struct ufs_hba_variant_ops {
int (*link_startup_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
int (*pwr_change_notify)(struct ufs_hba *,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *desired_pwr_mode,
- struct ufs_pa_layer_attr *final_params);
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *desired_pwr_mode,
+ struct ufs_pa_layer_attr *final_params);
void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
bool is_scsi_cmd);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
@@ -373,8 +373,6 @@ struct ufs_hba_variant_ops {
void (*config_scaling_param)(struct ufs_hba *hba,
struct devfreq_dev_profile *profile,
struct devfreq_simple_ondemand_data *data);
- int (*program_key)(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot);
int (*fill_crypto_prdt)(struct ufs_hba *hba,
const struct bio_crypt_ctx *crypt_ctx,
void *prdt, unsigned int num_segments);
@@ -387,6 +385,7 @@ struct ufs_hba_variant_ops {
unsigned long *ocqs);
int (*config_esi)(struct ufs_hba *hba);
void (*config_scsi_dev)(struct scsi_device *sdev);
+ u32 (*freq_to_gear_speed)(struct ufs_hba *hba, unsigned long freq);
};
/* clock gating state */
@@ -403,6 +402,9 @@ enum clk_gating_state {
* delay_ms
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
+ * @clk_gating_workq: workqueue for clock gating work.
+ * @lock: serialize access to some struct ufs_clk_gating members. An outer lock
+ * relative to the host lock
* @state: the current clocks state
* @delay_ms: gating delay in ms
* @is_suspended: clk gating is suspended when set to 1 which can be used
@@ -413,11 +415,14 @@ enum clk_gating_state {
* @is_initialized: Indicates whether clock gating is initialized or not
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
- * @clk_gating_workq: workqueue for clock gating work.
*/
struct ufs_clk_gating {
struct delayed_work gate_work;
struct work_struct ungate_work;
+ struct workqueue_struct *clk_gating_workq;
+
+ spinlock_t lock;
+
enum clk_gating_state state;
unsigned long delay_ms;
bool is_suspended;
@@ -426,11 +431,14 @@ struct ufs_clk_gating {
bool is_enabled;
bool is_initialized;
int active_reqs;
- struct workqueue_struct *clk_gating_workq;
};
/**
* struct ufs_clk_scaling - UFS clock scaling related data
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @lock: serialize access to some struct ufs_clk_scaling members
* @active_reqs: number of requests that are pending. If this is zero when
* devfreq ->target() function is called then schedule "suspend_work" to
* suspend devfreq.
@@ -440,11 +448,10 @@ struct ufs_clk_gating {
* @enable_attr: sysfs attribute to enable/disable clock scaling
* @saved_pwr_info: UFS power mode may also be changed during scaling and this
* one keeps track of previous power mode.
- * @workq: workqueue to schedule devfreq suspend/resume work
- * @suspend_work: worker to suspend devfreq
- * @resume_work: worker to resume devfreq
* @target_freq: frequency requested by devfreq framework
* @min_gear: lowest HS gear to scale down to
+ * @wb_gear: enable Write Booster when HS gear scales above or equal to it, else
+ * disable Write Booster
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
* clkscale_enable sysfs node
* @is_allowed: tracks if scaling is currently allowed or not, used to block
@@ -454,17 +461,21 @@ struct ufs_clk_gating {
* @is_suspended: tracks if devfreq is suspended or not
*/
struct ufs_clk_scaling {
+ struct workqueue_struct *workq;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+
+ spinlock_t lock;
+
int active_reqs;
unsigned long tot_busy_t;
ktime_t window_start_t;
ktime_t busy_start_t;
struct device_attribute enable_attr;
struct ufs_pa_layer_attr saved_pwr_info;
- struct workqueue_struct *workq;
- struct work_struct suspend_work;
- struct work_struct resume_work;
unsigned long target_freq;
u32 min_gear;
+ u32 wb_gear;
bool is_enabled;
bool is_allowed;
bool is_initialized;
@@ -490,8 +501,6 @@ struct ufs_event_hist {
/**
* struct ufs_stats - keeps usage/err statistics
- * @last_intr_status: record the last interrupt status.
- * @last_intr_ts: record the last interrupt timestamp.
* @hibern8_exit_cnt: Counter to keep track of number of exits,
* reset this after link-startup.
* @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
@@ -499,9 +508,6 @@ struct ufs_event_hist {
* @event: array with event history.
*/
struct ufs_stats {
- u32 last_intr_status;
- u64 last_intr_ts;
-
u32 hibern8_exit_cnt;
u64 last_hibern8_exit_tstamp;
struct ufs_event_hist event[UFS_EVT_CNT];
@@ -946,9 +952,9 @@ enum ufshcd_mcq_opr {
* @nr_queues: number of Queues of different queue types
* @complete_put: whether or not to call ufshcd_rpm_put() from inside
* ufshcd_resume_complete()
- * @ext_iid_sup: is EXT_IID is supported by UFSHC
* @mcq_sup: is mcq supported by UFSHC
* @mcq_enabled: is mcq ready to accept requests
+ * @mcq_esi_enabled: is mcq ESI configured
* @res: array of resource info of MCQ registers
* @mcq_base: Multi circular queue registers base address
* @uhq: array of supported hardware queues
@@ -957,6 +963,10 @@ enum ufshcd_mcq_opr {
* @ufs_rtc_update_work: A work for UFS RTC periodic update
* @pm_qos_req: PM QoS request handle
* @pm_qos_enabled: flag to check if pm qos is enabled
+ * @critical_health_count: count of critical health exceptions
+ * @dev_lvl_exception_count: count of device level exceptions since last reset
+ * @dev_lvl_exception_id: vendor specific information about the
+ * device level exception event.
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -1112,11 +1122,11 @@ struct ufs_hba {
unsigned int nr_hw_queues;
unsigned int nr_queues[HCTX_MAX_TYPES];
bool complete_put;
- bool ext_iid_sup;
bool scsi_host_added;
bool mcq_sup;
bool lsdb_sup;
bool mcq_enabled;
+ bool mcq_esi_enabled;
struct ufshcd_res_info res[RES_MAX];
void __iomem *mcq_base;
struct ufs_hw_queue *uhq;
@@ -1126,6 +1136,10 @@ struct ufs_hba {
struct delayed_work ufs_rtc_update_work;
struct pm_qos_request pm_qos_req;
bool pm_qos_enabled;
+
+ int critical_health_count;
+ atomic_t dev_lvl_exception_count;
+ u64 dev_lvl_exception_id;
};
/**
@@ -1202,6 +1216,14 @@ static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
#endif
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+static inline struct ufs_hba *
+ufs_hba_from_crypto_profile(struct blk_crypto_profile *profile)
+{
+ return container_of(profile, struct ufs_hba, crypto_profile);
+}
+#endif
+
static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba)
{
return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
@@ -1297,7 +1319,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
void ufshcd_enable_irq(struct ufs_hba *hba);
void ufshcd_disable_irq(struct ufs_hba *hba);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
-void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
@@ -1357,6 +1378,8 @@ extern int ufshcd_system_thaw(struct device *dev);
extern int ufshcd_system_restore(struct device *dev);
#endif
+extern int ufshcd_dme_reset(struct ufs_hba *hba);
+extern int ufshcd_dme_enable(struct ufs_hba *hba);
extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
int adapt_val);
@@ -1414,7 +1437,7 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
-static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
+static inline bool ufshcd_is_hs_mode(const struct ufs_pa_layer_attr *pwr_info)
{
return (pwr_info->pwr_rx == FAST_MODE ||
pwr_info->pwr_rx == FASTAUTO_MODE) &&
@@ -1450,12 +1473,14 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
struct scatterlist *sg_list, enum dma_data_direction dir);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
+int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode);
int ufshcd_suspend_prepare(struct device *dev);
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
bool ufshcd_is_hba_active(struct ufs_hba *hba);
void ufshcd_pm_qos_init(struct ufs_hba *hba);
void ufshcd_pm_qos_exit(struct ufs_hba *hba);
+int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask, u32 val, u32 attr);
/* Wrapper functions for safely calling variant operations */
static inline int ufshcd_vops_init(struct ufs_hba *hba)