summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/nvme/target/admin-cmd.c9
-rw-r--r--drivers/nvme/target/configfs.c12
-rw-r--r--drivers/nvme/target/core.c108
-rw-r--r--drivers/nvme/target/nvmet.h7
-rw-r--r--drivers/nvme/target/pr.c8
5 files changed, 79 insertions, 65 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2962794ce881..fa89b0549c36 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -139,7 +139,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
unsigned long idx;
ctrl = req->sq->ctrl;
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
@@ -331,9 +331,10 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
u32 count = 0;
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
- xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->anagrpid == grpid)
desc->nsids[count++] = cpu_to_le32(ns->nsid);
+ }
}
desc->grpid = cpu_to_le32(grpid);
@@ -772,7 +773,7 @@ static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
goto out;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_endgid)
continue;
@@ -815,7 +816,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
goto out;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
if (match_css && req->ns->csi != req->cmd->identify.csi)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 9c109b93ffbf..2b030f0efc38 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -810,18 +810,6 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
NULL,
};
-bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
-{
- struct config_item *ns_item;
- char name[12];
-
- snprintf(name, sizeof(name), "%u", nsid);
- mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
- ns_item = config_group_find_item(&subsys->namespaces_group, name);
- mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
- return ns_item != NULL;
-}
-
static void nvmet_ns_release(struct config_item *item)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 1f4e9989663b..fde6c555af61 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -127,7 +127,7 @@ static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
unsigned long idx;
u32 nsid = 0;
- xa_for_each(&subsys->namespaces, idx, cur)
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur)
nsid = cur->nsid;
return nsid;
@@ -441,11 +441,14 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
req->ns = xa_load(&subsys->namespaces, nsid);
- if (unlikely(!req->ns)) {
+ if (unlikely(!req->ns || !req->ns->enabled)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
- if (nvmet_subsys_nsid_exists(subsys, nsid))
- return NVME_SC_INTERNAL_PATH_ERROR;
- return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
+ if (!req->ns) /* ns doesn't exist! */
+ return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
+
+ /* ns exists but it's disabled */
+ req->ns = NULL;
+ return NVME_SC_INTERNAL_PATH_ERROR;
}
percpu_ref_get(&req->ns->ref);
@@ -583,8 +586,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_unlock;
ret = -EMFILE;
- if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
- goto out_unlock;
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
@@ -599,38 +600,19 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
- ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
- 0, GFP_KERNEL);
- if (ret)
- goto out_dev_put;
-
- if (ns->nsid > subsys->max_nsid)
- subsys->max_nsid = ns->nsid;
-
- ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
- if (ret)
- goto out_restore_subsys_maxnsid;
-
if (ns->pr.enable) {
ret = nvmet_pr_init_ns(ns);
if (ret)
- goto out_remove_from_subsys;
+ goto out_dev_put;
}
- subsys->nr_namespaces++;
-
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
+ xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
-
-out_remove_from_subsys:
- xa_erase(&subsys->namespaces, ns->nsid);
-out_restore_subsys_maxnsid:
- subsys->max_nsid = nvmet_max_nsid(subsys);
- percpu_ref_exit(&ns->ref);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -649,15 +631,37 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
goto out_unlock;
ns->enabled = false;
- xa_erase(&ns->subsys->namespaces, ns->nsid);
- if (ns->nsid == subsys->max_nsid)
- subsys->max_nsid = nvmet_max_nsid(subsys);
+ xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
mutex_unlock(&subsys->lock);
+ if (ns->pr.enable)
+ nvmet_pr_exit_ns(ns);
+
+ mutex_lock(&subsys->lock);
+ nvmet_ns_changed(subsys, ns->nsid);
+ nvmet_ns_dev_disable(ns);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_ns_free(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+
+ nvmet_ns_disable(ns);
+
+ mutex_lock(&subsys->lock);
+
+ xa_erase(&subsys->namespaces, ns->nsid);
+ if (ns->nsid == subsys->max_nsid)
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ mutex_unlock(&subsys->lock);
+
/*
* Now that we removed the namespaces from the lookup list, we
* can kill the per_cpu ref and wait for any remaining references
@@ -671,21 +675,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
wait_for_completion(&ns->disable_done);
percpu_ref_exit(&ns->ref);
- if (ns->pr.enable)
- nvmet_pr_exit_ns(ns);
-
mutex_lock(&subsys->lock);
-
subsys->nr_namespaces--;
- nvmet_ns_changed(subsys, ns->nsid);
- nvmet_ns_dev_disable(ns);
-out_unlock:
mutex_unlock(&subsys->lock);
-}
-
-void nvmet_ns_free(struct nvmet_ns *ns)
-{
- nvmet_ns_disable(ns);
down_write(&nvmet_ana_sem);
nvmet_ana_group_enabled[ns->anagrpid]--;
@@ -699,15 +691,33 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ns *ns;
+ mutex_lock(&subsys->lock);
+
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
if (!ns)
- return NULL;
+ goto out_unlock;
init_completion(&ns->disable_done);
ns->nsid = nsid;
ns->subsys = subsys;
+ if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
+ goto out_free;
+
+ if (ns->nsid > subsys->max_nsid)
+ subsys->max_nsid = nsid;
+
+ if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL))
+ goto out_exit;
+
+ subsys->nr_namespaces++;
+
+ mutex_unlock(&subsys->lock);
+
down_write(&nvmet_ana_sem);
ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
nvmet_ana_group_enabled[ns->anagrpid]++;
@@ -718,6 +728,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->csi = NVME_CSI_NVM;
return ns;
+out_exit:
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+ percpu_ref_exit(&ns->ref);
+out_free:
+ kfree(ns);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return NULL;
}
static void nvmet_update_sq_head(struct nvmet_req *req)
@@ -1394,7 +1412,7 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
ctrl->p2p_client = get_device(req->p2p_client);
- xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 58328b35dc96..7233549f7c8a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -24,6 +24,7 @@
#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
+#define NVMET_NS_ENABLED XA_MARK_1
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1)
@@ -33,6 +34,12 @@
#define NVMET_FR_MAX_SIZE 8
#define NVMET_PR_LOG_QUEUE_SIZE 64
+#define nvmet_for_each_ns(xa, index, entry) \
+ xa_for_each(xa, index, entry)
+
+#define nvmet_for_each_enabled_ns(xa, index, entry) \
+ xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED)
+
/*
* Supported optional AENs:
*/
diff --git a/drivers/nvme/target/pr.c b/drivers/nvme/target/pr.c
index 90e9f5bbe581..cd22d8333314 100644
--- a/drivers/nvme/target/pr.c
+++ b/drivers/nvme/target/pr.c
@@ -60,7 +60,7 @@ u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask)
goto success;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->pr.enable)
WRITE_ONCE(ns->pr.notify_mask, mask);
}
@@ -1056,7 +1056,7 @@ int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
* nvmet_pr_init_ns(), see more details in nvmet_ns_enable().
* So just check ns->pr.enable.
*/
- xa_for_each(&subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
&ctrl->hostid);
@@ -1067,7 +1067,7 @@ int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
return 0;
free_per_ctrl_refs:
- xa_for_each(&subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
if (pc_ref)
@@ -1087,7 +1087,7 @@ void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl)
kfifo_free(&ctrl->pr_log_mgr.log_queue);
mutex_destroy(&ctrl->pr_log_mgr.lock);
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
if (pc_ref)