summaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/Kconfig5
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c55
-rw-r--r--drivers/target/loopback/Kconfig1
-rw-r--r--drivers/target/loopback/tcm_loop.c15
-rw-r--r--drivers/target/sbp/sbp_target.c18
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/target/target_core_device.c53
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_sbc.c7
-rw-r--r--drivers/target/target_core_tmr.c30
-rw-r--r--drivers/target/target_core_transport.c285
-rw-r--r--drivers/target/target_core_ua.c43
-rw-r--r--drivers/target/target_core_ua.h3
-rw-r--r--drivers/target/target_core_user.c377
-rw-r--r--drivers/target/target_core_xcopy.c5
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c10
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
22 files changed, 512 insertions, 435 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 4c44d7bed01a..cb6f32ce7de8 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -1,10 +1,10 @@
menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
- depends on SCSI && BLOCK
+ depends on BLOCK
select CONFIGFS_FS
select CRC_T10DIF
- select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
+ select BLK_SCSI_REQUEST
select SGL_ALLOC
default n
help
@@ -29,6 +29,7 @@ config TCM_FILEIO
config TCM_PSCSI
tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+ depends on SCSI
help
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0ebc4818e132..95d0a22b2ad6 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1090,10 +1090,8 @@ static struct configfs_attribute *lio_target_tpg_attrs[] = {
/* Start items for lio_target_tiqn_cit */
-static struct se_portal_group *lio_target_tiqn_addtpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *lio_target_tiqn_addtpg(struct se_wwn *wwn,
+ const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 99501785cdc1..923b1a9fc3dc 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -369,7 +369,7 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}
- sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+ sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 4b34f71547c6..101d62105c93 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -636,8 +636,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
none = strstr(buf1, NONE);
if (none)
goto out;
- strncat(buf1, ",", strlen(","));
- strncat(buf1, NONE, strlen(NONE));
+ strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 4435bf374d2d..49be1e41290c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -17,7 +17,7 @@
******************************************************************************/
#include <linux/list.h>
-#include <linux/percpu_ida.h>
+#include <linux/sched/signal.h>
#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
@@ -147,6 +147,30 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
spin_unlock_bh(&cmd->r2t_lock);
}
+static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
+{
+ int tag = -1;
+ DEFINE_WAIT(wait);
+ struct sbq_wait_state *ws;
+
+ if (state == TASK_RUNNING)
+ return tag;
+
+ ws = &se_sess->sess_tag_pool.ws[0];
+ for (;;) {
+ prepare_to_wait_exclusive(&ws->wait, &wait, state);
+ if (signal_pending_state(state, current))
+ break;
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
+
+ finish_wait(&ws->wait, &wait);
+ return tag;
+}
+
/*
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
@@ -155,9 +179,11 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
{
struct iscsi_cmd *cmd;
struct se_session *se_sess = conn->sess->se_sess;
- int size, tag;
+ int size, tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ tag = iscsit_wait_for_tag(se_sess, state, &cpu);
if (tag < 0)
return NULL;
@@ -166,6 +192,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
memset(cmd, 0, size);
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->conn = conn;
cmd->data_direction = DMA_NONE;
INIT_LIST_HEAD(&cmd->i_conn_node);
@@ -711,7 +738,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(sess->se_sess, se_cmd);
}
EXPORT_SYMBOL(iscsit_release_cmd);
@@ -1026,26 +1053,8 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
void iscsit_start_nopin_timer(struct iscsi_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
- struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
- /*
- * NOPIN timeout is disabled..
- */
- if (!na->nopin_timeout)
- return;
-
spin_lock_bh(&conn->nopin_timer_lock);
- if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
- spin_unlock_bh(&conn->nopin_timer_lock);
- return;
- }
-
- conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
- conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
- mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
-
- pr_debug("Started NOPIN Timer on CID: %d at %u second"
- " interval\n", conn->cid, na->nopin_timeout);
+ __iscsit_start_nopin_timer(conn);
spin_unlock_bh(&conn->nopin_timer_lock);
}
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index abe8ecbcdf06..158ee9d522f7 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -1,5 +1,6 @@
config LOOPBACK_TARGET
tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
+ depends on SCSI
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 60d5b918c4ac..bc8918f382e4 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -239,10 +239,7 @@ out:
return ret;
release:
- if (se_cmd)
- transport_generic_free_cmd(se_cmd, 0);
- else
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
goto out;
}
@@ -768,7 +765,7 @@ static int tcm_loop_make_nexus(
if (!tl_nexus)
return -ENOMEM;
- tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
+ tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
name, tl_nexus, tcm_loop_alloc_sess_cb);
if (IS_ERR(tl_nexus->se_sess)) {
@@ -808,7 +805,7 @@ static int tcm_loop_drop_nexus(
/*
* Release the SCSI I_T Nexus to the emulated Target Port
*/
- transport_deregister_session(tl_nexus->se_sess);
+ target_remove_session(se_sess);
tpg->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
@@ -983,10 +980,8 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
/* Start items for tcm_loop_naa_cit */
-static struct se_portal_group *tcm_loop_make_naa_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index fb1003921d85..3d10189ecedc 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -209,7 +209,7 @@ static struct sbp_session *sbp_session_create(
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
sess->guid = guid;
- sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
+ sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
sizeof(struct sbp_target_request),
TARGET_PROT_NORMAL, guid_str,
sess, NULL);
@@ -235,8 +235,7 @@ static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
if (cancel_work)
cancel_delayed_work_sync(&sess->maint_work);
- transport_deregister_session_configfs(sess->se_sess);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
if (sess->card)
fw_card_put(sess->card);
@@ -926,15 +925,16 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
{
struct se_session *se_sess = sess->se_sess;
struct sbp_target_request *req;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
return ERR_PTR(-ENOMEM);
req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
memset(req, 0, sizeof(*req));
req->se_cmd.map_tag = tag;
+ req->se_cmd.map_cpu = cpu;
req->se_cmd.tag = next_orb;
return req;
@@ -1460,7 +1460,7 @@ static void sbp_free_request(struct sbp_target_request *req)
kfree(req->pg_tbl);
kfree(req->cmd_buf);
- percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+ target_free_tag(se_sess, se_cmd);
}
static void sbp_mgt_agent_process(struct work_struct *work)
@@ -2005,10 +2005,8 @@ static void sbp_pre_unlink_lun(
pr_err("unlink LUN: failed to update unit directory\n");
}
-static struct se_portal_group *sbp_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct sbp_tport *tport =
container_of(wwn, struct sbp_tport, tport_wwn);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 5ccef7d597fa..f6b1549f4142 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -263,8 +263,8 @@ static struct config_group *target_core_register_fabric(
&tf->tf_discovery_cit);
configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
- pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
- " %s\n", tf->tf_group.cg_item.ci_name);
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
+ config_item_name(&tf->tf_group.cg_item));
return &tf->tf_group;
}
@@ -810,7 +810,7 @@ static ssize_t pi_prot_type_store(struct config_item *item,
dev->transport->name);
return -ENOSYS;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("DIF protection requires device to be configured\n");
return -ENODEV;
}
@@ -859,7 +859,7 @@ static ssize_t pi_prot_format_store(struct config_item *item,
dev->transport->name);
return -ENOSYS;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("DIF protection format requires device to be configured\n");
return -ENODEV;
}
@@ -1948,7 +1948,7 @@ static ssize_t target_dev_enable_show(struct config_item *item, char *page)
{
struct se_device *dev = to_device(item);
- return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+ return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
}
static ssize_t target_dev_enable_store(struct config_item *item,
@@ -2473,7 +2473,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("Unable to set alua_access_state while device is"
" not configured\n");
return -ENODEV;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e27db4d45a9d..47b5ef153135 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -336,7 +336,6 @@ int core_enable_device_list_for_node(
return -ENOMEM;
}
- atomic_set(&new->ua_count, 0);
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
@@ -879,39 +878,21 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
}
EXPORT_SYMBOL(target_to_linux_sector);
-/**
- * target_find_device - find a se_device by its dev_index
- * @id: dev_index
- * @do_depend: true if caller needs target_depend_item to be done
- *
- * If do_depend is true, the caller must do a target_undepend_item
- * when finished using the device.
- *
- * If do_depend is false, the caller must be called in a configfs
- * callback or during removal.
- */
-struct se_device *target_find_device(int id, bool do_depend)
-{
- struct se_device *dev;
-
- mutex_lock(&device_mutex);
- dev = idr_find(&devices_idr, id);
- if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
- dev = NULL;
- mutex_unlock(&device_mutex);
- return dev;
-}
-EXPORT_SYMBOL(target_find_device);
-
struct devices_idr_iter {
+ struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
static int target_devices_idr_iter(int id, void *p, void *data)
+ __must_hold(&device_mutex)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
+ int ret;
+
+ config_item_put(iter->prev_item);
+ iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
@@ -919,10 +900,18 @@ static int target_devices_idr_iter(int id, void *p, void *data)
* to allow other callers to access partially setup devices,
* so we skip them here.
*/
- if (!(dev->dev_flags & DF_CONFIGURED))
+ if (!target_dev_configured(dev))
+ return 0;
+
+ iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!iter->prev_item)
return 0;
+ mutex_unlock(&device_mutex);
+
+ ret = iter->fn(dev, iter->data);
- return iter->fn(dev, iter->data);
+ mutex_lock(&device_mutex);
+ return ret;
}
/**
@@ -936,15 +925,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data)
{
- struct devices_idr_iter iter;
+ struct devices_idr_iter iter = { .fn = fn, .data = data };
int ret;
- iter.fn = fn;
- iter.data = data;
-
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
+ config_item_put(iter.prev_item);
return ret;
}
@@ -953,7 +940,7 @@ int target_configure_device(struct se_device *dev)
struct se_hba *hba = dev->se_hba;
int ret, id;
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
@@ -1058,7 +1045,7 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
destroy_workqueue(dev->tmr_wq);
dev->transport->destroy_device(dev);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e1416b007aa4..aa2f4f632ebe 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -34,6 +34,7 @@
#include <linux/configfs.h>
#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
@@ -642,7 +643,7 @@ static int target_fabric_port_link(
}
dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("se_device not configured yet, cannot port link\n");
return -ENODEV;
}
@@ -841,7 +842,7 @@ static struct config_group *target_fabric_make_tpg(
return ERR_PTR(-ENOSYS);
}
- se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
+ se_tpg = tf->tf_ops->fabric_make_tpg(wwn, name);
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dead30b1d32c..0c6635587930 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,7 @@ int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-int transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index b054682e974f..ebac2b49b9c6 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -978,9 +978,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
}
case COMPARE_AND_WRITE:
if (!dev->dev_attrib.emulate_caw) {
- pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject"
- " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name,
- dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial);
+ pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
+ dev->se_hba->backend->ops->name,
+ config_item_name(&dev->dev_group.cg_item),
+ dev->t10_wwn.unit_serial);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
sectors = cdb[13];
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 9c7bc1ca341a..6d1179a7f043 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,25 +75,6 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
kfree(tmr);
}
-static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
-{
- unsigned long flags;
- bool remove = true, send_tas;
- /*
- * TASK ABORTED status (TAS) bit support
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- send_tas = (cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- if (send_tas) {
- remove = false;
- transport_send_task_abort(cmd);
- }
-
- return transport_cmd_finish_abort(cmd, remove);
-}
-
static int target_check_cdb_and_preempt(struct list_head *list,
struct se_cmd *cmd)
{
@@ -142,7 +123,7 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
return false;
}
}
- if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
@@ -187,13 +168,12 @@ void core_tmr_abort_task(
if (!__target_check_io_state(se_cmd, se_sess, 0))
continue;
- list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- if (!transport_cmd_finish_abort(se_cmd, true))
+ if (!transport_cmd_finish_abort(se_cmd))
target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
@@ -259,7 +239,7 @@ static void core_tmr_drain_tmr_list(
spin_unlock(&sess->sess_cmd_lock);
continue;
}
- if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue;
@@ -291,7 +271,7 @@ static void core_tmr_drain_tmr_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- if (!transport_cmd_finish_abort(cmd, 1))
+ if (!transport_cmd_finish_abort(cmd))
target_put_sess_cmd(cmd);
}
}
@@ -380,7 +360,7 @@ static void core_tmr_drain_state_list(
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- if (!core_tmr_handle_tas_abort(cmd, tas))
+ if (!transport_cmd_finish_abort(cmd))
target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ee5081ba5313..86c0156e6c88 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,7 +64,7 @@ struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
-static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
static void target_complete_ok_work(struct work_struct *work);
@@ -224,7 +224,27 @@ void transport_subsystem_check_init(void)
sub_api_initialized = 1;
}
-struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
+/**
+ * transport_init_session - initialize a session object
+ * @se_sess: Session object pointer.
+ *
+ * The caller must have zero-initialized @se_sess before calling this function.
+ */
+void transport_init_session(struct se_session *se_sess)
+{
+ INIT_LIST_HEAD(&se_sess->sess_list);
+ INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+ spin_lock_init(&se_sess->sess_cmd_lock);
+ init_waitqueue_head(&se_sess->cmd_list_wq);
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/**
+ * transport_alloc_session - allocate a session object and initialize it
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
@@ -234,17 +254,20 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&se_sess->sess_list);
- INIT_LIST_HEAD(&se_sess->sess_acl_list);
- INIT_LIST_HEAD(&se_sess->sess_cmd_list);
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
- spin_lock_init(&se_sess->sess_cmd_lock);
+ transport_init_session(se_sess);
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session);
+EXPORT_SYMBOL(transport_alloc_session);
+/**
+ * transport_alloc_session_tags - allocate target driver private data
+ * @se_sess: Session pointer.
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ */
int transport_alloc_session_tags(struct se_session *se_sess,
unsigned int tag_num, unsigned int tag_size)
{
@@ -260,7 +283,8 @@ int transport_alloc_session_tags(struct se_session *se_sess,
}
}
- rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
+ rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
+ false, GFP_KERNEL, NUMA_NO_NODE);
if (rc < 0) {
pr_err("Unable to init se_sess->sess_tag_pool,"
" tag_num: %u\n", tag_num);
@@ -273,9 +297,16 @@ int transport_alloc_session_tags(struct se_session *se_sess,
}
EXPORT_SYMBOL(transport_alloc_session_tags);
-struct se_session *transport_init_session_tags(unsigned int tag_num,
- unsigned int tag_size,
- enum target_prot_op sup_prot_ops)
+/**
+ * transport_init_session_tags - allocate a session and target driver private data
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+static struct se_session *
+transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
+ enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
int rc;
@@ -291,7 +322,7 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
return ERR_PTR(-EINVAL);
}
- se_sess = transport_init_session(sup_prot_ops);
+ se_sess = transport_alloc_session(sup_prot_ops);
if (IS_ERR(se_sess))
return se_sess;
@@ -303,7 +334,6 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session_tags);
/*
* Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
@@ -316,6 +346,7 @@ void __transport_register_session(
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
+ unsigned long flags;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
@@ -352,7 +383,7 @@ void __transport_register_session(
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
@@ -361,7 +392,7 @@ void __transport_register_session(
list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
@@ -385,7 +416,7 @@ void transport_register_session(
EXPORT_SYMBOL(transport_register_session);
struct se_session *
-target_alloc_session(struct se_portal_group *tpg,
+target_setup_session(struct se_portal_group *tpg,
unsigned int tag_num, unsigned int tag_size,
enum target_prot_op prot_op,
const char *initiatorname, void *private,
@@ -401,7 +432,7 @@ target_alloc_session(struct se_portal_group *tpg,
if (tag_num != 0)
sess = transport_init_session_tags(tag_num, tag_size, prot_op);
else
- sess = transport_init_session(prot_op);
+ sess = transport_alloc_session(prot_op);
if (IS_ERR(sess))
return sess;
@@ -427,7 +458,7 @@ target_alloc_session(struct se_portal_group *tpg,
transport_register_session(tpg, sess->se_node_acl, sess, private);
return sess;
}
-EXPORT_SYMBOL(target_alloc_session);
+EXPORT_SYMBOL(target_setup_session);
ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
@@ -547,7 +578,7 @@ void transport_free_session(struct se_session *se_sess)
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
- percpu_ida_destroy(&se_sess->sess_tag_pool);
+ sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
}
kmem_cache_free(se_sess_cache, se_sess);
@@ -585,6 +616,13 @@ void transport_deregister_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_deregister_session);
+void target_remove_session(struct se_session *se_sess)
+{
+ transport_deregister_session_configfs(se_sess);
+ transport_deregister_session(se_sess);
+}
+EXPORT_SYMBOL(target_remove_session);
+
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -601,6 +639,13 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+/*
+ * This function is called by the target core after the target core has
+ * finished processing a SCSI command or SCSI TMF. Both the regular command
+ * processing code and the code for aborting commands can call this
+ * function. CMD_T_STOP is set if and only if another thread is waiting
+ * inside transport_wait_for_tasks() for t_transport_stop_comp.
+ */
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
unsigned long flags;
@@ -650,23 +695,27 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
percpu_ref_put(&lun->lun_ref);
}
-int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd)
{
+ bool send_tas = cmd->transport_state & CMD_T_TAS;
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
int ret = 0;
+ if (send_tas)
+ transport_send_task_abort(cmd);
+
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
/*
* Allow the fabric driver to unmap any resources before
* releasing the descriptor via TFO->release_cmd()
*/
- if (remove)
+ if (!send_tas)
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
- if (remove && ack_kref)
+ if (!send_tas && ack_kref)
ret = target_put_sess_cmd(cmd);
return ret;
@@ -1267,7 +1316,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
- init_completion(&cmd->cmd_wait_comp);
+ cmd->compl = NULL;
spin_lock_init(&cmd->t_state_lock);
INIT_WORK(&cmd->work, NULL);
kref_init(&cmd->cmd_kref);
@@ -2079,9 +2128,6 @@ static void transport_complete_qf(struct se_cmd *cmd)
if (cmd->scsi_status)
goto queue_status;
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
goto queue_status;
}
@@ -2593,20 +2639,37 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+/*
+ * This function is called by frontend drivers after processing of a command
+ * has finished.
+ *
+ * The protocol for ensuring that either the regular flow or the TMF
+ * code drops one reference is as follows:
+ * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
+ * the frontend driver to drop one reference, synchronously or asynchronously.
+ * - During regular command processing the target core sets CMD_T_COMPLETE
+ * before invoking one of the .queue_*() functions.
+ * - The code that aborts commands skips commands and TMFs for which
+ * CMD_T_COMPLETE has been set.
+ * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
+ * commands that will be aborted.
+ * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
+ * transport_generic_free_cmd() skips its call to target_put_sess_cmd().
+ * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
+ * be called and will drop a reference.
+ * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
+ * will be called. transport_cmd_finish_abort() will drop the final reference.
+ */
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
+ DECLARE_COMPLETION_ONSTACK(compl);
int ret = 0;
bool aborted = false, tas = false;
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
- if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- target_wait_free_cmd(cmd, &aborted, &tas);
+ if (wait_for_tasks)
+ target_wait_free_cmd(cmd, &aborted, &tas);
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
- } else {
- if (wait_for_tasks)
- target_wait_free_cmd(cmd, &aborted, &tas);
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
/*
* Handle WRITE failure case where transport_generic_new_cmd()
* has already added se_cmd to state_list, but fabric has
@@ -2617,20 +2680,14 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
-
- if (!aborted || tas)
- ret = target_put_sess_cmd(cmd);
}
- /*
- * If the task has been internally aborted due to TMR ABORT_TASK
- * or LUN_RESET, target_core_tmr.c is responsible for performing
- * the remaining calls to target_put_sess_cmd(), and not the
- * callers of this function.
- */
+ if (aborted)
+ cmd->compl = &compl;
+ if (!aborted || tas)
+ ret = target_put_sess_cmd(cmd);
if (aborted) {
pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
- wait_for_completion(&cmd->cmd_wait_comp);
- cmd->se_tfo->release_cmd(cmd);
+ wait_for_completion(&compl);
ret = 1;
}
return ret;
@@ -2691,30 +2748,21 @@ static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
+ struct completion *compl = se_cmd->compl;
unsigned long flags;
- bool fabric_stop;
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
- spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
- (se_cmd->transport_state & CMD_T_ABORTED);
- spin_unlock(&se_cmd->t_state_lock);
-
- if (se_cmd->cmd_wait_set || fabric_stop) {
- list_del_init(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- complete(&se_cmd->cmd_wait_comp);
- return;
- }
list_del_init(&se_cmd->se_cmd_list);
+ if (list_empty(&se_sess->sess_cmd_list))
+ wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
+ if (compl)
+ complete(compl);
}
/**
@@ -2833,78 +2881,41 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
EXPORT_SYMBOL(target_show_cmd);
/**
- * target_sess_cmd_list_set_waiting - Flag all commands in
- * sess_cmd_list to complete cmd_wait_comp. Set
- * sess_tearing_down so no more commands are queued.
+ * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
* @se_sess: session to flag
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
- int rc;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- return;
- }
se_sess->sess_tearing_down = 1;
- list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- rc = kref_get_unless_zero(&se_cmd->cmd_kref);
- if (rc) {
- se_cmd->cmd_wait_set = 1;
- spin_lock(&se_cmd->t_state_lock);
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- spin_unlock(&se_cmd->t_state_lock);
- } else
- list_del_init(&se_cmd->se_cmd_list);
- }
-
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
/**
- * target_wait_for_sess_cmds - Wait for outstanding descriptors
+ * target_wait_for_sess_cmds - Wait for outstanding commands
* @se_sess: session to wait for active I/O
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
- unsigned long flags;
- bool tas;
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
- " %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- tas = (se_cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-
- if (!target_put_sess_cmd(se_cmd)) {
- if (tas)
- target_put_sess_cmd(se_cmd);
- }
-
- wait_for_completion(&se_cmd->cmd_wait_comp);
- pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- se_cmd->se_tfo->release_cmd(se_cmd);
- }
+ struct se_cmd *cmd;
+ int ret;
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- WARN_ON(!list_empty(&se_sess->sess_cmd_list));
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ WARN_ON_ONCE(!se_sess->sess_tearing_down);
+ spin_lock_irq(&se_sess->sess_cmd_lock);
+ do {
+ ret = wait_event_interruptible_lock_irq_timeout(
+ se_sess->cmd_list_wq,
+ list_empty(&se_sess->sess_cmd_list),
+ se_sess->sess_cmd_lock, 180 * HZ);
+ list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
+ target_show_cmd("session shutdown: still waiting for ",
+ cmd);
+ } while (ret <= 0);
+ spin_unlock_irq(&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
@@ -3166,12 +3177,23 @@ static const struct sense_info sense_info_table[] = {
},
};
-static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
+/**
+ * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
+ * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
+ * be stored.
+ * @reason: LIO sense reason code. If this argument has the value
+ * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
+ * dequeuing a unit attention fails due to multiple commands being processed
+ * concurrently, set the command status to BUSY.
+ *
+ * Return: 0 upon success or -EINVAL if the sense buffer is too small.
+ */
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
const struct sense_info *si;
u8 *buffer = cmd->sense_buffer;
int r = (__force int)reason;
- u8 asc, ascq;
+ u8 key, asc, ascq;
bool desc_format = target_sense_desc_format(cmd->se_dev);
if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
@@ -3180,9 +3202,13 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
si = &sense_info_table[(__force int)
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
+ key = si->key;
if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
- core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
- WARN_ON_ONCE(asc == 0);
+ if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
+ &ascq)) {
+ cmd->scsi_status = SAM_STAT_BUSY;
+ return;
+ }
} else if (si->asc == 0) {
WARN_ON_ONCE(cmd->scsi_asc == 0);
asc = cmd->scsi_asc;
@@ -3192,13 +3218,14 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
ascq = si->ascq;
}
- scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+ scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
if (si->add_sector_info)
- return scsi_set_sense_information(buffer,
- cmd->scsi_sense_length,
- cmd->bad_sector);
-
- return 0;
+ WARN_ON_ONCE(scsi_set_sense_information(buffer,
+ cmd->scsi_sense_length,
+ cmd->bad_sector) < 0);
}
int
@@ -3215,16 +3242,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!from_transport) {
- int rc;
-
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
- rc = translate_sense_reason(cmd, reason);
- if (rc)
- return rc;
- }
+ if (!from_transport)
+ translate_sense_reason(cmd, reason);
trace_target_cmd_complete(cmd);
return cmd->se_tfo->queue_status(cmd);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index be25eb807a5f..c8ac242ce888 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -55,7 +55,7 @@ target_scsi3_ua_check(struct se_cmd *cmd)
rcu_read_unlock();
return 0;
}
- if (!atomic_read(&deve->ua_count)) {
+ if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return 0;
}
@@ -154,7 +154,6 @@ int core_scsi3_ua_allocate(
&deve->ua_list);
spin_unlock(&deve->ua_lock);
- atomic_inc_mb(&deve->ua_count);
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -164,7 +163,6 @@ int core_scsi3_ua_allocate(
" 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
asc, ascq);
- atomic_inc_mb(&deve->ua_count);
return 0;
}
@@ -196,16 +194,17 @@ void core_scsi3_ua_release_all(
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
}
-void core_scsi3_ua_for_check_condition(
- struct se_cmd *cmd,
- u8 *asc,
- u8 *ascq)
+/*
+ * Dequeue a unit attention from the unit attention list. This function
+ * returns true if the dequeuing succeeded and if *@key, *@asc and *@ascq have
+ * been set.
+ */
+bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
+ u8 *ascq)
{
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
@@ -214,23 +213,23 @@ void core_scsi3_ua_for_check_condition(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!sess)
- return;
+ if (WARN_ON_ONCE(!sess))
+ return false;
nacl = sess->se_node_acl;
- if (!nacl)
- return;
+ if (WARN_ON_ONCE(!nacl))
+ return false;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
- return;
- }
- if (!atomic_read(&deve->ua_count)) {
- rcu_read_unlock();
- return;
+ *key = ILLEGAL_REQUEST;
+ *asc = 0x25; /* LOGICAL UNIT NOT SUPPORTED */
+ *ascq = 0;
+ return true;
}
+ *key = UNIT_ATTENTION;
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
@@ -260,8 +259,6 @@ void core_scsi3_ua_for_check_condition(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
@@ -273,6 +270,8 @@ void core_scsi3_ua_for_check_condition(
(dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
+
+ return head == 0;
}
int core_scsi3_ua_clear_for_request_sense(
@@ -299,7 +298,7 @@ int core_scsi3_ua_clear_for_request_sense(
rcu_read_unlock();
return -EINVAL;
}
- if (!atomic_read(&deve->ua_count)) {
+ if (list_empty_careful(&deve->ua_list)) {
rcu_read_unlock();
return -EPERM;
}
@@ -322,8 +321,6 @@ int core_scsi3_ua_clear_for_request_sense(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
rcu_read_unlock();
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index b0f4205a96cd..76487c9be090 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -37,7 +37,8 @@ extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
-extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern bool core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *,
+ u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
u8 *, u8 *);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index d8dc3d22051f..9cd404acdb82 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -83,14 +83,10 @@
#define DATA_BLOCK_SIZE PAGE_SIZE
#define DATA_BLOCK_SHIFT PAGE_SHIFT
#define DATA_BLOCK_BITS_DEF (256 * 1024)
-#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
-/* The total size of the ring is 8M + 256K * PAGE_SIZE */
-#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
-
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
@@ -98,6 +94,7 @@
#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
+static u8 tcmu_netlink_blocked;
static struct device *tcmu_root_device;
@@ -107,9 +104,16 @@ struct tcmu_hba {
#define TCMU_CONFIG_LEN 256
+static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
+static LIST_HEAD(tcmu_nl_cmd_list);
+
+struct tcmu_dev;
+
struct tcmu_nl_cmd {
/* wake up thread waiting for reply */
struct completion complete;
+ struct list_head nl_list;
+ struct tcmu_dev *udev;
int cmd;
int status;
};
@@ -133,7 +137,7 @@ struct tcmu_dev {
struct inode *inode;
struct tcmu_mailbox *mb_addr;
- size_t dev_size;
+ uint64_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
@@ -161,10 +165,7 @@ struct tcmu_dev {
struct list_head timedout_entry;
- spinlock_t nl_cmd_lock;
struct tcmu_nl_cmd curr_nl_cmd;
- /* wake up threads waiting on curr_nl_cmd */
- wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
@@ -255,6 +256,92 @@ MODULE_PARM_DESC(global_max_data_area_mb,
"Max MBs allowed to be allocated to all the tcmu device's "
"data areas.");
+static int tcmu_get_block_netlink(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
+ "blocked" : "unblocked");
+}
+
+static int tcmu_set_block_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 1) {
+ pr_err("Invalid block netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ tcmu_netlink_blocked = val;
+ return 0;
+}
+
+static const struct kernel_param_ops tcmu_block_netlink_op = {
+ .set = tcmu_set_block_netlink,
+ .get = tcmu_get_block_netlink,
+};
+
+module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
+
+static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
+{
+ struct tcmu_dev *udev = nl_cmd->udev;
+
+ if (!tcmu_netlink_blocked) {
+ pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
+ return -EBUSY;
+ }
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
+ nl_cmd->status = -EINTR;
+ list_del(&nl_cmd->nl_list);
+ complete(&nl_cmd->complete);
+ }
+ return 0;
+}
+
+static int tcmu_set_reset_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 1) {
+ pr_err("Invalid reset netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
+ ret = tcmu_fail_netlink_cmd(nl_cmd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+
+ return ret;
+}
+
+static const struct kernel_param_ops tcmu_reset_netlink_op = {
+ .set = tcmu_set_reset_netlink,
+};
+
+module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
+MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
+
/* multicast group */
enum tcmu_multicast_groups {
TCMU_MCGRP_CONFIG,
@@ -274,48 +361,50 @@ static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
{
- struct se_device *dev;
- struct tcmu_dev *udev;
+ struct tcmu_dev *udev = NULL;
struct tcmu_nl_cmd *nl_cmd;
int dev_id, rc, ret = 0;
- bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
!info->attrs[TCMU_ATTR_DEVICE_ID]) {
printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
- return -EINVAL;
+ return -EINVAL;
}
dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
- dev = target_find_device(dev_id, !is_removed);
- if (!dev) {
- printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
- completed_cmd, rc, dev_id);
- return -ENODEV;
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
+ if (nl_cmd->udev->se_dev.dev_index == dev_id) {
+ udev = nl_cmd->udev;
+ break;
+ }
}
- udev = TCMU_DEV(dev);
- spin_lock(&udev->nl_cmd_lock);
- nl_cmd = &udev->curr_nl_cmd;
+ if (!udev) {
+ pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
+ completed_cmd, rc, dev_id);
+ ret = -ENODEV;
+ goto unlock;
+ }
+ list_del(&nl_cmd->nl_list);
- pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
- nl_cmd->cmd, completed_cmd, rc);
+ pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
+ udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
+ nl_cmd->status);
if (nl_cmd->cmd != completed_cmd) {
- printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
- completed_cmd, nl_cmd->cmd);
+ pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
+ udev->name, completed_cmd, nl_cmd->cmd);
ret = -EINVAL;
- } else {
- nl_cmd->status = rc;
+ goto unlock;
}
- spin_unlock(&udev->nl_cmd_lock);
- if (!is_removed)
- target_undepend_item(&dev->dev_group.cg_item);
- if (!ret)
- complete(&nl_cmd->complete);
+ nl_cmd->status = rc;
+ complete(&nl_cmd->complete);
+unlock:
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -982,7 +1071,6 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
&udev->cmd_timer);
if (ret) {
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- mutex_unlock(&udev->cmdr_lock);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
@@ -1282,6 +1370,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->max_blocks = DATA_BLOCK_BITS_DEF;
mutex_init(&udev->cmdr_lock);
+ INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->cmdr_queue);
idr_init(&udev->commands);
@@ -1289,9 +1378,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
- init_waitqueue_head(&udev->nl_cmd_wq);
- spin_lock_init(&udev->nl_cmd_lock);
-
INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
return &udev->se_dev;
@@ -1565,38 +1651,48 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
return 0;
}
-static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
+static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
if (!tcmu_kern_cmd_reply_supported)
- return;
+ return 0;
if (udev->nl_reply_supported <= 0)
- return;
+ return 0;
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
-relock:
- spin_lock(&udev->nl_cmd_lock);
+ if (tcmu_netlink_blocked) {
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
+ udev->name);
+ return -EAGAIN;
+ }
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
- spin_unlock(&udev->nl_cmd_lock);
- pr_debug("sleeping for open nl cmd\n");
- wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
- goto relock;
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("netlink cmd %d already executing on %s\n",
+ nl_cmd->cmd, udev->name);
+ return -EBUSY;
}
memset(nl_cmd, 0, sizeof(*nl_cmd));
nl_cmd->cmd = cmd;
+ nl_cmd->udev = udev;
init_completion(&nl_cmd->complete);
+ INIT_LIST_HEAD(&nl_cmd->nl_list);
+
+ list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
- spin_unlock(&udev->nl_cmd_lock);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ return 0;
}
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
int ret;
- DEFINE_WAIT(__wait);
if (!tcmu_kern_cmd_reply_supported)
return 0;
@@ -1607,13 +1703,10 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
- spin_lock(&udev->nl_cmd_lock);
+ mutex_lock(&tcmu_nl_cmd_mutex);
nl_cmd->cmd = TCMU_CMD_UNSPEC;
ret = nl_cmd->status;
- nl_cmd->status = 0;
- spin_unlock(&udev->nl_cmd_lock);
-
- wake_up_all(&udev->nl_cmd_wq);
+ mutex_unlock(&tcmu_nl_cmd_mutex);
return ret;
}
@@ -1657,19 +1750,21 @@ free_skb:
static int tcmu_netlink_event_send(struct tcmu_dev *udev,
enum tcmu_genl_cmd cmd,
- struct sk_buff **buf, void **hdr)
+ struct sk_buff *skb, void *msg_header)
{
- int ret = 0;
- struct sk_buff *skb = *buf;
- void *msg_header = *hdr;
+ int ret;
genlmsg_end(skb, msg_header);
- tcmu_init_genl_cmd_reply(udev, cmd);
+ ret = tcmu_init_genl_cmd_reply(udev, cmd);
+ if (ret) {
+ nlmsg_free(skb);
+ return ret;
+ }
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
- /* We don't care if no one is listening */
+ /* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
if (!ret)
@@ -1687,9 +1782,8 @@ static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
&msg_header);
if (ret < 0)
return ret;
- return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
- &msg_header);
-
+ return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
+ msg_header);
}
static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
@@ -1703,7 +1797,7 @@ static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
if (ret < 0)
return ret;
return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static int tcmu_update_uio_info(struct tcmu_dev *udev)
@@ -1745,9 +1839,11 @@ static int tcmu_configure_device(struct se_device *dev)
info = &udev->uio_info;
+ mutex_lock(&udev->cmdr_lock);
udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
sizeof(unsigned long),
GFP_KERNEL);
+ mutex_unlock(&udev->cmdr_lock);
if (!udev->data_bitmap) {
ret = -ENOMEM;
goto err_bitmap_alloc;
@@ -1842,11 +1938,6 @@ err_bitmap_alloc:
return ret;
}
-static bool tcmu_dev_configured(struct tcmu_dev *udev)
-{
- return udev->uio_info.uio_dev ? true : false;
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1953,45 +2044,76 @@ enum {
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
- {Opt_dev_size, "dev_size=%u"},
- {Opt_hw_block_size, "hw_block_size=%u"},
- {Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_dev_size, "dev_size=%s"},
+ {Opt_hw_block_size, "hw_block_size=%d"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
- {Opt_max_data_area_mb, "max_data_area_mb=%u"},
+ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_err, NULL}
};
static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
{
- unsigned long tmp_ul;
- char *arg_p;
- int ret;
+ int val, ret;
- arg_p = match_strdup(arg);
- if (!arg_p)
- return -ENOMEM;
-
- ret = kstrtoul(arg_p, 0, &tmp_ul);
- kfree(arg_p);
+ ret = match_int(arg, &val);
if (ret < 0) {
- pr_err("kstrtoul() failed for dev attrib\n");
+ pr_err("match_int() failed for dev attrib. Error %d.\n",
+ ret);
return ret;
}
- if (!tmp_ul) {
- pr_err("dev attrib must be nonzero\n");
+
+ if (val <= 0) {
+ pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
+ val);
return -EINVAL;
}
- *dev_attrib = tmp_ul;
+ *dev_attrib = val;
return 0;
}
+static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid max_data_area %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
+ if (udev->max_blocks > tcmu_global_max_blocks) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ udev->max_blocks = tcmu_global_max_blocks;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- char *orig, *ptr, *opts, *arg_p;
+ char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token, tmpval;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -2014,15 +2136,10 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
break;
case Opt_dev_size:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
- kfree(arg_p);
+ ret = match_u64(&args[0], &udev->dev_size);
if (ret < 0)
- pr_err("kstrtoul() failed for dev_size=\n");
+ pr_err("match_u64() failed for dev_size=. Error %d.\n",
+ ret);
break;
case Opt_hw_block_size:
ret = tcmu_set_dev_attrib(&args[0],
@@ -2033,48 +2150,13 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
&(dev->dev_attrib.hw_max_sectors));
break;
case Opt_nl_reply_supported:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
- kfree(arg_p);
+ ret = match_int(&args[0], &udev->nl_reply_supported);
if (ret < 0)
- pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
+ ret);
break;
case Opt_max_data_area_mb:
- if (dev->export_count) {
- pr_err("Unable to set max_data_area_mb while exports exist\n");
- ret = -EINVAL;
- break;
- }
-
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &tmpval);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoint() failed for max_data_area_mb=\n");
- break;
- }
-
- if (tmpval <= 0) {
- pr_err("Invalid max_data_area %d\n", tmpval);
- ret = -EINVAL;
- break;
- }
-
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- tmpval,
- TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
- }
+ ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
default:
break;
@@ -2095,7 +2177,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
- bl += sprintf(b + bl, "Size: %zu ", udev->dev_size);
+ bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
TCMU_BLOCKS_TO_MBS(udev->max_blocks));
@@ -2222,7 +2304,7 @@ static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
@@ -2239,7 +2321,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
return -EINVAL;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_config_event(udev, page);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2264,7 +2346,7 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
- return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
+ return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
}
static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
@@ -2284,7 +2366,7 @@ static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
@@ -2301,7 +2383,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_dev_size_event(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2366,7 +2448,7 @@ static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
return ret;
}
return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
- &skb, &msg_header);
+ skb, msg_header);
}
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
@@ -2383,7 +2465,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
return ret;
/* Check if device has been configured before */
- if (tcmu_dev_configured(udev)) {
+ if (target_dev_configured(&udev->se_dev)) {
ret = tcmu_send_emulate_write_cache(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
@@ -2419,6 +2501,11 @@ static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2446,6 +2533,11 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
u8 val;
int ret;
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
ret = kstrtou8(page, 0, &val);
if (ret < 0)
return ret;
@@ -2510,6 +2602,11 @@ static void find_free_blocks(void)
list_for_each_entry(udev, &root_udev, node) {
mutex_lock(&udev->cmdr_lock);
+ if (!target_dev_configured(&udev->se_dev)) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
+
/* Try to complete the finished commands first */
tcmu_handle_completions(udev);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 9ee89e00cd77..2718a933c0c6 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -497,10 +497,7 @@ int target_xcopy_setup_pt(void)
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
- INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
- spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
+ transport_init_session(&xcopy_pt_sess);
xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ec372860106f..a183d4da7db2 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -28,7 +28,6 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
-#include <linux/percpu_ida.h>
#include <asm/unaligned.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
@@ -92,7 +91,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
if (fr_seq(fp))
fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
- percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
ft_sess_put(sess); /* undo get from lookup at recv */
}
@@ -448,9 +447,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
struct se_session *se_sess = sess->se_sess;
- int tag;
+ int tag, cpu;
- tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
if (tag < 0)
goto busy;
@@ -458,10 +457,11 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
memset(cmd, 0, sizeof(struct ft_cmd));
cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
- percpu_ida_free(&se_sess->sess_tag_pool, tag);
+ target_free_tag(se_sess, &cmd->se_cmd);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 42ee91123dca..e55c4d537592 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -223,10 +223,7 @@ static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
/*
* local_port port_group (tpg) ops.
*/
-static struct se_portal_group *ft_add_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
{
struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index c91979c1463d..6d4adf5ec26c 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -239,7 +239,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess->tport = tport;
sess->port_id = port_id;
- sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
+ sess->se_sess = target_setup_session(se_tpg, TCM_FC_DEFAULT_TAGS,
sizeof(struct ft_cmd),
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
@@ -287,7 +287,6 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
static void ft_close_sess(struct ft_sess *sess)
{
- transport_deregister_session_configfs(sess->se_sess);
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
ft_sess_put(sess);
@@ -448,7 +447,7 @@ static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
kfree_rcu(sess, rcu);
}