diff options
Diffstat (limited to 'drivers/nvme/target/nvmet.h')
-rw-r--r-- | drivers/nvme/target/nvmet.h | 67 |
1 files changed, 65 insertions, 2 deletions
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 190f55e6d753..58328b35dc96 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -20,8 +20,9 @@ #include <linux/blkdev.h> #include <linux/radix-tree.h> #include <linux/t10-pi.h> +#include <linux/kfifo.h> -#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0) +#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0) #define NVMET_ASYNC_EVENTS 4 #define NVMET_ERROR_LOG_SLOTS 128 @@ -30,6 +31,7 @@ #define NVMET_MN_MAX_SIZE 40 #define NVMET_SN_MAX_SIZE 20 #define NVMET_FR_MAX_SIZE 8 +#define NVMET_PR_LOG_QUEUE_SIZE 64 /* * Supported optional AENs: @@ -56,6 +58,38 @@ #define IPO_IATTR_CONNECT_SQE(x) \ (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) +struct nvmet_pr_registrant { + u64 rkey; + uuid_t hostid; + enum nvme_pr_type rtype; + struct list_head entry; + struct rcu_head rcu; +}; + +struct nvmet_pr { + bool enable; + unsigned long notify_mask; + atomic_t generation; + struct nvmet_pr_registrant __rcu *holder; + /* + * During the execution of the reservation command, mutual + * exclusion is required throughout the process. However, + * while waiting asynchronously for the 'per controller + * percpu_ref' to complete before the 'preempt and abort' + * command finishes, a semaphore is needed to ensure mutual + * exclusion instead of a mutex. + */ + struct semaphore pr_sem; + struct list_head registrant_list; +}; + +struct nvmet_pr_per_ctrl_ref { + struct percpu_ref ref; + struct completion free_done; + struct completion confirm_done; + uuid_t hostid; +}; + struct nvmet_ns { struct percpu_ref ref; struct file *bdev_file; @@ -85,6 +119,8 @@ struct nvmet_ns { int pi_type; int metadata_size; u8 csi; + struct nvmet_pr pr; + struct xarray pr_per_ctrl_refs; }; static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) @@ -191,6 +227,13 @@ static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port) return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED; } +struct nvmet_pr_log_mgr { + struct mutex lock; + u64 lost_count; + u64 counter; + DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE); +}; + struct nvmet_ctrl { struct nvmet_subsys *subsys; struct nvmet_sq **sqs; @@ -246,6 +289,7 @@ struct nvmet_ctrl { u8 *dh_key; size_t dh_keysize; #endif + struct nvmet_pr_log_mgr pr_log_mgr; }; struct nvmet_subsys { @@ -396,6 +440,9 @@ struct nvmet_req { struct work_struct zmgmt_work; } z; #endif /* CONFIG_BLK_DEV_ZONED */ + struct { + struct work_struct abort_work; + } r; }; int sg_cnt; int metadata_sg_cnt; @@ -412,6 +459,7 @@ struct nvmet_req { struct device *p2p_client; u16 error_loc; u64 error_slba; + struct nvmet_pr_per_ctrl_ref *pc_ref; }; #define NVMET_MAX_MPOOL_BVEC 16 @@ -498,7 +546,8 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, - struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); + struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp, + uuid_t *hostid); struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_req *req); @@ -761,4 +810,18 @@ static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl) static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; } #endif +int nvmet_pr_init_ns(struct nvmet_ns *ns); +u16 nvmet_parse_pr_cmd(struct nvmet_req *req); +u16 nvmet_pr_check_cmd_access(struct nvmet_req *req); +int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl); +void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl); +void nvmet_pr_exit_ns(struct nvmet_ns *ns); +void nvmet_execute_get_log_page_resv(struct nvmet_req *req); +u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask); +u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req); +u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req); +static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref) +{ + percpu_ref_put(&pc_ref->ref); +} #endif /* _NVMET_H */ |